Compare commits

..

No commits in common. "main" and "26.0-alpha" have entirely different histories.

131 changed files with 207 additions and 14660 deletions

View file

@ -1,86 +0,0 @@
name: Build ISO
# Full ISO build is ~5-7 min. Only run on push-to-main and manual
# dispatch so feature-branch iteration stays fast. Docs-only changes
# skip the build — the `paths-ignore` list below covers *.md files,
# docs/, and the website (Hugo source). Anything that touches code,
# the ISO overlay, or the workflow itself still triggers a rebuild.
on:
push:
branches: [main]
paths-ignore:
- '**/*.md'
- 'docs/**'
- 'website/**'
- 'CHANGELOG.md'
- 'RELEASING.md'
workflow_dispatch:
concurrency:
group: build-iso-${{ github.ref }}
cancel-in-progress: true
jobs:
build-iso:
# Run directly on the runner host, not inside a job container.
# `build.sh` does `docker run -v $REPO_ROOT:/work archlinux:latest`,
# and host docker interprets the volume source as a host path — so
# $REPO_ROOT has to be a path on the host, which it only is when
# we skip the job-container wrapping. The runner VM has git + docker.
runs-on: self-hosted
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Build ISO
run: ./iso/build.sh
- name: Report ISO hash
run: |
iso=$(ls iso/out/*.iso | head -1)
echo "ISO: $iso"
sha256sum "$iso"
- name: Upload ISO artifact
# v4+ isn't supported on Forgejo yet (uses newer @actions/artifact
# protocol that Forgejo's GHES-compatible API doesn't implement).
uses: actions/upload-artifact@v3
with:
name: furtka-iso
path: iso/out/*.iso
retention-days: 14
if-no-files-found: error
- name: Cache ISO for smoke-latest
# Persist the ISO to /data/smoke-cache/latest.iso so the
# smoke-latest.yml workflow_dispatch job can re-test without
# rebuilding. /data is already mounted into the runner container
# at a matching host path.
run: |
mkdir -p /data/smoke-cache
iso=$(ls iso/out/*.iso | head -1)
cp -f "$iso" /data/smoke-cache/latest.iso
ls -lh /data/smoke-cache/latest.iso
- name: Install smoke prerequisites
# Runner container is Alpine with a near-empty base; smoke-vm.sh
# needs curl, python3, arp-scan, and sudo (kept so the script
# also works when invoked from a dev laptop as a non-root user).
# apk cache survives across jobs so subsequent runs are ~1 s.
run: apk add --no-cache curl python3 arp-scan sudo
- name: Smoke-test ISO on Proxmox test host
# Inlined as a step (rather than a separate job with `needs:`) so
# we can reuse the ISO that's already in the workspace — Forgejo's
# actions/download-artifact@v3 hangs on 1.5 GB files.
# step-level continue-on-error: a VM-side flake doesn't mark the
# ISO build red, the ISO itself is still valid and uploaded.
continue-on-error: true
env:
PVE_TEST_HOST: ${{ secrets.PVE_TEST_HOST }}
PVE_TEST_TOKEN: ${{ secrets.PVE_TEST_TOKEN }}
SMOKE_SHA: ${{ github.sha }}
run: |
iso=$(ls iso/out/*.iso | head -1)
echo "Smoking $iso"
./scripts/smoke-vm.sh "$iso"

View file

@ -53,9 +53,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Check markdown links - name: Check markdown links
# Fully-qualified URL because forgejo's default action mirror uses: lycheeverse/lychee-action@v2
# (data.forgejo.org) doesn't carry lycheeverse/lychee-action.
uses: https://github.com/lycheeverse/lychee-action@v2
with: with:
args: --verbose --no-progress --max-concurrency 4 './**/*.md' args: --verbose --no-progress --max-concurrency 4 './**/*.md'
fail: false fail: false

View file

@ -1,39 +0,0 @@
name: Deploy site
# Auto-deploy the Hugo site to /var/www/furtka.org on push-to-main.
# Only fires when content under website/ changes — everything else
# (Python code, ISO build, runbook docs) is unaffected.
#
# Runs on the self-hosted runner, which is forge-runner-01 — the same
# host that serves furtka.org. So the "deploy" is just a local rsync
# of the Hugo source into /srv/furtka-site and a `hugo` build into
# /var/www/furtka.org. No SSH, no secrets, no cross-host anything.
#
# Requires two bind-mounts on the runner container (/srv/furtka-site
# and /var/www/furtka.org → same paths inside). See compose.yml.
on:
push:
branches: [main]
paths:
- 'website/**'
concurrency:
group: deploy-site
cancel-in-progress: true
jobs:
deploy:
runs-on: self-hosted
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- name: Install hugo + rsync
# Runner image is alpine-based; apk is fast and cached.
# Pinning is intentionally skipped — alpine:latest moves hugo
# forward in lockstep with upstream, and the site only uses
# baseline features.
run: apk add --no-cache hugo rsync
- name: Deploy
run: ./website/deploy-ci.sh

View file

@ -1,59 +0,0 @@
name: Release
# Tag-triggered: when `git push origin <version>` lands, this builds the
# release tarball + the live-installer ISO, and publishes them both to
# the Forgejo releases page. Boxes POST /api/furtka/update to pull the
# tarball; fresh-install users download the ISO from the release page.
#
# Runs on the self-hosted runner because iso/build.sh needs privileged
# docker access (mkarchiso wants root + loop mounts), and because the
# ubuntu-latest Forgejo hosted runner doesn't carry the docker socket
# bind-mount the build needs. Self-hosted adds ~5-7 min to the release
# (ISO build) but keeps the release page self-contained.
#
# Version tags only (CalVer like 26.0-alpha, 26.1, 27.0-beta). Random
# tags are ignored by the [0-9]* prefix.
on:
push:
tags: ['[0-9]*']
jobs:
release:
runs-on: self-hosted
timeout-minutes: 45
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # changelog section extraction needs history
- name: Install prerequisites
# Alpine runner is near-empty: we need curl + python3 for the
# publish script, bash for the build scripts.
run: apk add --no-cache curl python3 bash
- name: Build release tarball
run: ./scripts/build-release-tarball.sh "${GITHUB_REF_NAME}"
- name: Build live-installer ISO
# Same script build-iso.yml uses on every main push. Re-running
# here is intentional: guarantees the ISO matches the exact
# tagged commit without coordinating across workflows. Step-level
# continue-on-error so an ISO build flake doesn't block the
# core tarball (which is what boxes need for self-update) from
# publishing.
continue-on-error: true
id: build_iso
run: ./iso/build.sh
- name: Move ISO into dist/
# publish-release.sh attaches dist/furtka-<ver>.iso if present.
# Skipped gracefully when the build step above failed.
if: steps.build_iso.outcome == 'success'
run: |
iso=$(ls iso/out/*.iso | head -1)
cp "$iso" "dist/furtka-${GITHUB_REF_NAME}.iso"
- name: Publish to Forgejo releases
env:
FORGEJO_TOKEN: ${{ secrets.FORGEJO_RELEASE_TOKEN }}
run: ./scripts/publish-release.sh "${GITHUB_REF_NAME}"

View file

@ -1,47 +0,0 @@
name: Smoke latest ISO
# Manual-trigger smoke test against the last ISO `build-iso.yml` produced.
# Use this when you've changed something that only affects smoke-vm.sh,
# the PVE setup, or the secrets — skips the 25-min ISO rebuild and only
# runs the ~2-min VM boot + /:5000 check.
#
# The ISO lives at /data/smoke-cache/latest.iso on the runner, populated
# by build-iso.yml's "Cache ISO for smoke-latest" step. That path is
# inside the runner's already-mounted /data volume, so no extra bind
# mounts needed.
on:
workflow_dispatch:
concurrency:
group: smoke-latest
cancel-in-progress: false
jobs:
smoke:
runs-on: self-hosted
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- name: Check cached ISO exists
run: |
iso=/data/smoke-cache/latest.iso
if [ ! -f "$iso" ]; then
echo "::error::$iso not found — trigger build-iso.yml first to populate the cache."
exit 1
fi
echo "Will smoke: $iso"
ls -lh "$iso"
- name: Install smoke prerequisites
# Runner container is Alpine with a near-empty base; smoke-vm.sh
# needs curl, python3, arp-scan, and sudo (kept so the script
# also works when invoked from a dev laptop as a non-root user).
run: apk add --no-cache curl python3 arp-scan sudo
- name: Smoke-test ISO on Proxmox test host
env:
PVE_TEST_HOST: ${{ secrets.PVE_TEST_HOST }}
PVE_TEST_TOKEN: ${{ secrets.PVE_TEST_TOKEN }}
SMOKE_SHA: ${{ github.sha }}
run: ./scripts/smoke-vm.sh /data/smoke-cache/latest.iso

7
.gitignore vendored
View file

@ -7,10 +7,3 @@ __pycache__/
# Real credentials must never be committed — use the .example files # Real credentials must never be committed — use the .example files
archinstall/user_credentials.json archinstall/user_credentials.json
iso/out/
# Hugo website
website/public/
website/resources/
website/.hugo_build.lock
website/hugo_stats.json

View file

@ -1,335 +1,12 @@
# Changelog # Changelog
All notable changes to Furtka will be documented in this file. All notable changes to Homebase will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
This project uses calendar versioning: `YY.N-stage` (e.g. `26.0-alpha` = 2026, release 0, alpha stage). This project uses calendar versioning: `YY.N-stage` (e.g. `26.0-alpha` = 2026, release 0, alpha stage).
## [Unreleased] ## [Unreleased]
## [26.15-alpha] - 2026-04-21
### Fixed
- **HTTPS is now opt-in; fresh installs no longer hit unbypassable
SEC_ERROR_BAD_SIGNATURE.** Every version since 26.5 shipped a
Caddyfile with a `__FURTKA_HOSTNAME__.local { tls internal }` site
block, so Caddy auto-generated a self-signed root CA + intermediate
+ leaf on first boot. That worked for first-time-ever users, but
every reinstall (or second Furtka box on the same LAN) produced a
new CA with the **same intermediate CN** (`Caddy Local Authority -
ECC Intermediate` — Caddy hardcodes it). Any browser that had ever
trusted an earlier Furtka CA got a cached intermediate with
mismatched keys, then Firefox's cert lookup substituted the cached
intermediate when validating the new box's leaf → the signature
check failed → `SEC_ERROR_BAD_SIGNATURE`, which Firefox has no
"Advanced → Accept Risk" bypass for.
- Removed the hostname site block from the default Caddyfile.
Fresh installs serve `:80` only; visiting `https://furtka.local`
now yields a clean connection-refused instead of the crypto
fault.
- Added top-level `import /etc/caddy/furtka-https.d/*.caddyfile`.
The `/settings` HTTPS toggle (via `furtka.https.set_force_https`)
now writes TWO snippets atomically — the top-level hostname +
`tls internal` block (enables `:443`) and the `:80`-scoped
redirect (forces HTTP → HTTPS) — and removes both on disable.
Caddy reloads after the pair-swap; failure rolls both back.
- Webinstaller creates `/etc/caddy/furtka-https.d/` during
post-install alongside the existing `furtka.d/`.
- `updater._refresh_caddyfile` runs a 26.14 → 26.15 migration: if
the box already had the redirect snippet on disk (user had
explicitly enabled "Force HTTPS" under the old regime), the
migration also writes the new listener snippet so HTTPS keeps
working across the upgrade.
- **`status.force_https` now reads the listener snippet, not the
redirect snippet.** A lone redirect without a `:443` listener
wouldn't actually serve HTTPS, so the listener file is the
authoritative "HTTPS is on" signal. The UI on `/settings` sees the
correct state as a result.
Known remaining UX wart: a browser that trusted a previous Furtka box
still sees `BAD_SIGNATURE` when visiting this box's `https://` after
enabling HTTPS here — the fixed intermediate CN is a Caddy-side
limitation we can't fix from Furtka. Fresh installs on a browser that
never visited another Furtka box work correctly. Workaround:
`about:networking#sts` → Forget → clear `cert9.db`.
## [26.14-alpha] - 2026-04-21
### Fixed
- **Landing page and `/settings/` were silently bypassing the auth
guard.** Since 26.11 shipped login, the Caddyfile only
reverse-proxied `/api/*`, `/apps*`, `/login*`, and `/logout*` to
Python. Everything else — including `/` and `/settings/` — fell
through to Caddy's catch-all `file_server` and was served straight
from `assets/www/` without ever hitting the session check. The
effect: a LAN visitor saw the box's hostname, IP, Furtka version,
and the buttons for Update-now / Reboot / HTTPS-toggle. The API
calls those buttons fired were all 401-auth-gated so actions didn't
land, but the information leak and the "looks open" UX was a real
bug. Caught in the 26.13 SSH test session when the user noticed
Logout only showed up on `/apps`. Now Caddy routes `/` and
`/settings*` through Python; a new `_serve_static_www` handler
checks the session cookie, redirects to `/login` if unauthed, and
reads the HTML from `assets/www/` otherwise. Catch-all still
serves `/style.css`, `/rootCA.crt`, and the runtime JSON files
publicly — those don't need auth.
- **Logout link now shows on every authed page, not just `/apps`.**
The static HTML for `/` and `/settings/` maintained their own nav
separate from `_HTML` in `api.py`, so they never got the Logout
entry when it was added in 26.11. Both nav bars now include it
plus an inline `doLogout()` that POSTs `/logout` and bounces to
`/login`, matching the pattern in `_HTML`.
## [26.13-alpha] - 2026-04-21
### Fixed
- **Upgrade path from pre-auth releases actually works.** 26.11-alpha
introduced `from werkzeug.security import ...` in `furtka/auth.py`,
but werkzeug isn't installed on the target system — core runs as
system Python with stdlib only, and `flask>=3.0` in `pyproject.toml`
is never pip-installed on the box. Fresh boxes from the 26.11/26.12
ISO without a manually-installed werkzeug crashed on import; boxes
upgrading from pre-26.11 got double-broken by that plus the health
check below. Replaced the werkzeug dependency with a stdlib-only
`furtka/passwd.py` that uses `hashlib.pbkdf2_hmac` for new hashes
and parses werkzeug's `scrypt:N:r:p$salt$hex` format for backward
compatibility — existing `users.json` files created on the rare
boxes that did have werkzeug keep working after this upgrade, no
re-setup needed. `from werkzeug.security import ...` is gone from
the import chain entirely; `pyproject.toml`'s flask dep stays only
for the live-ISO webinstaller.
- **Self-update no longer auto-rolls-back when crossing the auth
boundary.** `updater._health_check` pinged `/api/apps` and demanded
a 200, which meant every 26.10 → 26.11+ upgrade hit the post-restart
check, got a 401 (auth guard), and treated that as "server dead"
→ rollback. Now any 2xx4xx response counts as "server alive"; only
connection-level failures or 5xx fail the check. 5xx still fails
rollback because that means the new process is up but broken.
- **Install lock closes its race window.** `POST /api/apps/install`
used to release the fcntl lock immediately after the sync
pre-validation so the systemd-run child could re-acquire it —
leaving a tiny gap where a second POST could slip in, pass the lock
check, and return 202. Both child processes would start, one would
win the in-child lock, the other would die silently. Now the API
also reads `install-state.json` and refuses with 409 if the stage
is non-terminal (`pulling_image`, `creating_volumes`,
`starting_container`). The fcntl lock stays as belt-and-suspenders.
## [26.12-alpha] - 2026-04-21
### Changed
- **App-Install geht async mit Live-Progress.** `POST /api/apps/install`
returnt jetzt `202 Accepted` nach der synchronen Pre-Validation
(Source auflösen, Files kopieren, `.env` schreiben, Placeholder- und
Path-Checks). Den eigentlichen Docker-Teil (`compose pull` → volumes
`compose up`) dispatched der Handler als `systemd-run
--unit=furtka-install-<app>` Hintergrund-Job, der seine Phase in
`/var/lib/furtka/install-state.json` schreibt. Neues
`GET /api/apps/install/status` für UI-Polling. Das Install-Modal
zeigt jetzt live "Image wird heruntergeladen…" →
"Speicherbereiche werden erstellt…" → "Container wird gestartet…"
statt ~30 Sekunden totem "Installing…". Muster 1:1 parallel zu
`/api/catalog/sync/apply` und `/api/furtka/update/apply`. Neue CLI-
Subcommand `furtka app install-bg <name>` (intern, von der API
aufgerufen); `furtka app install` für Terminal-User bleibt synchron.
Die Reinstall-Taste in der App-Liste pollt ebenfalls den
Install-Status und spiegelt die Phase im Button-Text.
## [26.11-alpha] - 2026-04-21
### Added
- **Login-auth for the Furtka web UI.** Every `/apps`, `/api/*`, `/`,
and `/settings/` route now requires a signed-in session. New
`/login` page serves a username/password form; `POST /login`
validates against `/var/lib/furtka/users.json` (werkzeug PBKDF2-
hashed), sets a `furtka_session` cookie (`HttpOnly`, `SameSite=
Strict`, 7-day TTL), and redirects to `/apps`. `POST /logout`
revokes the server-side session and clears the cookie.
Unauthenticated HTML requests get a 302 to `/login`; unauthenticated
API requests get 401 JSON. The old "No authentication on this UI
yet" banner is gone; the `/apps` header picks up a `Logout` link
instead.
- **First-run setup fallback for upgrade-path boxes.** Boxes
upgrading from 26.10-alpha have no `users.json` yet — on the first
visit `/login` renders a setup form (username + password +
password-confirm) that creates the admin record on submit. Fresh
installs skip this: the webinstaller writes `users.json` during
the chroot post-install step using the step-1 password, so the
first browser visit after boot goes straight to the login form.
- **Caddy proxy routes `/login` and `/logout`.** `assets/Caddyfile`
gets two new `handle` blocks in the shared `(furtka_routes)`
snippet so both the `:80` block and the `hostname.local, hostname`
HTTPS block forward the auth endpoints to the stdlib server on
`127.0.0.1:7000`. Without this Caddy would serve a 404 from the
static file server.
### Fixed
- `tests/test_installer.py` ruff-format nit — the 26.10-alpha
release commit had a misformatted list literal that failed
`ruff format --check`. Caught when the Release page on Forgejo
showed a red CI badge for the tag.
- `pyproject.toml` version string bumped from the stale 26.8-alpha
to 26.11-alpha. Release pipeline uses `GITHUB_REF_NAME` as source
of truth for the artefact name, but having the two agree matters
for local dev runs that read `pyproject.toml`.
## [26.10-alpha] - 2026-04-21
### Added
- **Remove-USB-stick hint on the installer's post-install screen.**
`webinstaller/templates/install/rebooting.html` now shows a bold
"Remove the USB stick now" line before the reboot, plus a muted
fallback explaining the BIOS boot-menu keys (F11/F12/Esc) if the
machine boots back into the installer anyway. Caught on the first
bare-metal test (Medion i5-4gen, 2026-04-21) where the box didn't
boot the installed system without manual BIOS-order changes.
- **New `path` setting type for app manifests.** Apps can now declare a
setting with `"type": "path"` whose value is an absolute filesystem
path on the host; docker-compose bind-mounts it via the usual `.env`
substitution (`${MEDIA_PATH}:/media`). Unlocks media/data-heavy apps
(Jellyfin, later Paperless/Nextcloud/Immich) where the user points at
an existing folder instead of copying everything into a Docker
volume. The install form renders path settings as a plain text input
with a `/mnt/…` placeholder hint.
- **Server-side path validation.** Both `install_from()` and
`update_env()` refuse values that aren't absolute, don't exist,
aren't directories, or resolve (after `Path.resolve()`) into a
system-path deny-list (`/`, `/etc`, `/root`, `/boot`, `/proc`,
`/sys`, `/dev`, `/bin`, `/sbin`, `/usr/bin`, `/usr/sbin`,
`/var/lib/furtka`). Catches `/mnt/../etc`-style traversal too. Error
messages surface in the existing install/edit modal error line.
## [26.9-alpha] - 2026-04-21
### Fixed
- Landing-page app tiles with an `open_url` now open in a new tab
(`target="_blank" rel="noopener"`), matching the Open button
behaviour on `/apps`. Without this, clicking "Uptime Kuma" on the
home screen replaced Furtka itself with the Kuma admin page.
Internal links (the `Manage →` fallback for apps without an
`open_url`) still open in the same tab.
- `scripts/publish-release.sh` no longer fails the whole release when
the ISO upload hits a Forgejo proxy 504. The core tarball + sha256 +
release.json (which running boxes need for self-update) are uploaded
first and the ISO is attempted last as a best-effort; a 504 now logs
a warning and exits 0 so the release page still publishes. Surfaced
by the 26.8-alpha cut: the tarball landed but the ~1 GB ISO upload
timed out at the Forgejo reverse proxy.
### Changed
- `furtka app list --json` now mirrors `/api/apps` field-for-field —
previously the CLI emitted a slim projection missing
`description_long`, `open_url`, and `settings`. Anyone piping the
CLI output into jq for automation was seeing an incomplete view.
## [26.8-alpha] - 2026-04-20
### Added
- **Live-installer ISO attached to the Forgejo release page.** `.forgejo/workflows/release.yml` moves to the self-hosted runner, builds both the self-update tarball and the ISO, and `scripts/publish-release.sh` uploads the ISO as a fourth release asset (`furtka-<version>.iso`) alongside the existing tarball + sha256 + release.json. Fresh-install users can now grab the ISO from the release page instead of hunting through `build-iso.yml` artifact retention windows. ISO build step is `continue-on-error` so an ISO flake doesn't hold back the core tarball that running boxes need for self-update.
- **Reboot + Shut down buttons on `/settings`.** Replaces the two "Coming next" placeholders with real actions backed by `POST /api/furtka/power` (`{"action": "reboot" | "poweroff"}`). Handler kicks a delayed `systemd-run --on-active=3s systemctl {reboot|poweroff}` so the HTTP response reaches the browser before the kernel loses network. Each button opens a native confirm dialog first (reboot: "back in ~30 s", shut down: "need to press the physical power button"), then the UI swaps to a status line and — after a reboot — polls `/furtka.json` until the box is back, reloading the page automatically. No auth (same posture as install/remove).
- **Manifest `open_url` field + Open button in `/apps` and on the landing page.** Apps declare a URL template (e.g. `smb://{host}/files` for fileshare, `http://{host}:3001/` for Uptime Kuma); the UI substitutes `{host}` with the current browser's hostname at render time so the link follows however the user reached Furtka (furtka.local, raw IP, a future reverse-proxy hostname). The landing page's hardcoded `if app.name === 'fileshare'` special-case is gone — any app with an `open_url` in its manifest now gets a proper "Open" link. The core seed `apps/fileshare/manifest.json` bumps to v0.1.2 to carry it.
### Changed
- `.btn` CSS class introduced so an `<a>` rendered-as-button lines up with its `<button>` siblings in `.buttons`. Needed because "Open" is a real link (middle-click, copy URL, screen readers) and HTML doesn't let `<button>` carry `href`.
### Notes
- `26.7-alpha` was tagged but never published — the tag push didn't trigger `release.yml` (Forgejo race with the concurrent main push). `26.8-alpha` supersedes it and carries the same content plus power actions.
## [26.6-alpha] - 2026-04-20
### Added
- **Apps catalog synced independently of core.** A new `daniel/furtka-apps` Forgejo repo carries the bundled app catalog; running boxes pull the latest release via `furtka-catalog-sync.timer` (10 min post-boot + daily, ±6 h jitter) and extract atomically into `/var/lib/furtka/catalog/`. The resolver now prefers catalog apps over the seed `/opt/furtka/current/apps/` tree that ships inside the core release tarball, so apps can update without cutting a Furtka core release. Manual trigger: "Sync apps catalog" button on `/apps`, or `sudo furtka catalog sync` at the console. Fresh boxes with no network fall back to the seed, so offline first-boot still shows installable apps. Installed apps are never auto-swapped — users click Reinstall in `/apps` to move an existing install onto a newer catalog version (settings merge-preserved via the existing `installer.install_from` path).
- **Catalog CLI**: `furtka catalog sync [--check] [--json]` + `furtka catalog status [--json]`. Same shape as the core `furtka update` commands.
- **Catalog API endpoints**: `POST /api/catalog/sync/check`, `POST /api/catalog/sync/apply` (detached via `systemd-run` for symmetry with `/api/furtka/update/apply`), `GET /api/catalog/status`. The existing `/api/bundled` endpoint keeps working as a backwards-compat alias for `/api/apps/available`, which now returns the union of catalog + seed apps with a new `"source"` field on each entry (`"catalog"` | `"bundled"`).
### Changed
- **`furtka._release_common`** extracted from `furtka.updater`. Both `updater` and the new `catalog` module now share one implementation of the Forgejo-releases-API call, SHA256 verification, path-traversal-guarded tarball extraction, and CalVer comparison. Public updater surface unchanged.
- **`_link_new_units` now auto-enables newly-linked `.timer` units.** On self-update, a fresh timer file (e.g. `furtka-catalog-sync.timer` added in this release) needs `systemctl enable` to actually start firing — linking alone isn't enough. Fresh installs get their enable via the webinstaller's `_FURTKA_UNITS` list as before.
### Fixed
- **SHA-256 CA fingerprint no longer overflows the `/settings` Local HTTPS card** on narrow viewports. `.kv dd` grid items now set `min-width: 0` + `overflow-wrap: anywhere` so the colon-separated hex string breaks within the card's right edge instead of pushing past it.
## [26.5-alpha] - 2026-04-20
### Fixed
- **HTTPS handshake regression on the installed box (#10).** Phase 1 shipped two linked bugs: the `:443 { tls internal }` site block had no hostname, so Caddy never issued a leaf cert and every SNI handshake died with `SSL_ERROR_INTERNAL_ERROR_ALERT`; and both `furtka.https` and the Caddyfile's `/rootCA.crt` handler referenced `/var/lib/caddy/.local/share/caddy/pki/…`, a path that doesn't exist because our systemd unit sets `XDG_DATA_HOME=/var/lib`. Force-HTTPS toggle made the brokenness user-visible by redirecting working HTTP to dead HTTPS. Fixed: the Caddyfile now ships a `__FURTKA_HOSTNAME__.local, __FURTKA_HOSTNAME__ { tls internal }` block with the placeholder substituted at install time (`webinstaller/app.py`) and on every self-update (`furtka.updater._refresh_caddyfile` reads `/etc/hostname`). `auto_https disable_redirects` keeps Caddy's built-in redirect out of the way of the `/settings` toggle. PKI paths corrected in both `furtka/https.py` and `assets/Caddyfile`. Verified end-to-end on the 192.168.178.110 test VM: TLS 1.3 handshake completes, leaf cert issued, `/rootCA.crt` returns 200.
### Changed
- **Wizard footer version is now dynamic.** `webinstaller/app.py` resolves the Furtka version at startup via a Flask context processor — reads `/opt/furtka/VERSION` on the live ISO (written by `iso/build.sh` from `pyproject.toml` at build time), falls back to `pyproject.toml` in dev runs, then to literal `"dev"`. The 26.4 footer was hand-pinned and drifted within hours of release; that follow-up item is now closed.
- **Docs realigned with 26.4-alpha reality.** `apps/README.md` added (manifest schema, volume namespacing, `.env.example` guardrails, SVG sanitiser limits, install/test flow). Root `README.md` roadmap updated with Phase 1 HTTPS + smoke-VM pipeline as shipped items and 26.4-alpha in the release list. `iso/README.md` corrected: mDNS is wired (not "later milestone"), post-install default URL is `http://furtka.local` (not `proksi.local`), HTTPS is available via `tls internal` since 26.4. `website/README.md` now documents the auto-deploy on push-to-main as the default path, manual `deploy.sh` as the SSH-hop fallback.
## [26.4-alpha] - 2026-04-18
### Added
- **Local HTTPS via Caddy `tls internal`** on port 443. Caddy generates a per-box local root CA on first start; the Caddyfile now serves both `:80` and `:443` from the same routes. HTTP stays on by default — no regression for users who haven't trusted the CA yet. New "Local HTTPS" section in `/settings` shows the CA's SHA-256 fingerprint, offers a one-click download of `rootCA.crt`, links to the per-OS install guide at `/https-install/`, and exposes an opt-in "force HTTPS" toggle that only unhides itself once the current browser has already trusted the cert (so enabling it can't lock the user out of the settings page). Backend: `GET /api/furtka/https/status` and `POST /api/furtka/https/force` in `furtka.https`. The force toggle drops a Caddy import snippet into `/etc/caddy/furtka.d/redirect.caddyfile` and reloads Caddy; reload failure automatically rolls the snippet state back so a bad config can't wedge the next service start.
- **Impressum + Datenschutzerklärung on furtka.org** (both DE and EN) covering §5 DDG and Art. 13 GDPR. Linked from the site footer on every page; bilingual with DE as the legally binding version.
- **Auto-deploy of furtka.org on push-to-main.** New `.forgejo/workflows/deploy-site.yml` runs on the self-hosted runner (which *is* forge-runner-01 — the webserver host), so the deploy is just a local rsync + `hugo --minify` into `/var/www/furtka.org/`. No SSH, no secrets. Manual `website/deploy.sh` remains for out-of-band deploys.
- **Post-build smoke VM on Proxmox test host 192.168.178.165.** Every `build-iso` run boots the freshly built ISO in a throwaway VM on pollux (8 GiB RAM / 2 vCPU — the 4 GB default OOM-ed the host during mkinitcpio), then curls `:5000` to confirm the webinstaller is alive. VMs in VMID range 90009099 tagged with the commit SHA; last 5 kept for post-mortem debugging. Optional `workflow_dispatch` "Smoke latest ISO" re-tests the cached ISO in ~2 min without rebuilding. Step-level `continue-on-error` means a VM-side flake doesn't mark the ISO build red.
### Fixed
- **Settings page "Installed" field now refreshes after a self-update.** The `/api/furtka/update/check` response already carries `current` — the settings JS now drives `upd-current` from it the same way it drives `upd-latest`, so clicking "Check for updates" after a successful update reflects the new installed version without a force-reload.
- **Auto-reload on update completion is now reliable.** Clicking "Update now" arms a 45 s fallback `setTimeout(location.reload)` in addition to the existing `/update-state.json` polling loop. If the mid-apply API restart drops the poll connection before `stage: done` is ever observed (as seen on the 2026-04-16 VM test), the fallback still brings the page up on the new version. The fallback is cleared on `done` (5 s reload wins) or `rolled_back` (user needs the error visible).
- **Version string in the webinstaller footer** was pinned at `26.0-alpha` and didn't track releases. Bumped to `26.4-alpha` for this release; follow-up will make it render from `pyproject.toml` dynamically.
## [26.3-alpha] - 2026-04-16
### Fixed
- **Release workflow no longer depends on `jq`.** The previous `apt-get install -y jq` step hung on a slow mirror for 15+ minutes and stalled the 26.2-alpha publish. `publish-release.sh` now assembles the release-create payload via a tiny `python3 -c` block — Python is always available on the Forgejo Actions runner. `apt-get` path removed entirely.
## [26.2-alpha] - 2026-04-16
### Fixed
- **Updater "Check for updates" no longer 404s when every release is a pre-release.** `check_update()` queried Forgejo's `/releases/latest`, which silently excludes pre-releases (anything tagged `-alpha`/`-beta`/`-rc`) and returns 404 when there is no stable release. Switched to `/releases?limit=1`, which Forgejo sorts newest-first across all release kinds. During the alpha stage where every tag is a pre-release this is the only thing that works; once we tag a stable release, the same query still picks it up.
## [26.1-alpha] - 2026-04-16
### Added
- **Furtka self-update** (Phase 2). Tagging a release on main fires `.forgejo/workflows/release.yml`, which packages `furtka/` + `apps/` + a root-level `VERSION` file as `furtka-<tag>.tar.gz`, uploads it plus a `.sha256` + `release.json` to the Forgejo releases page, and makes the release available to running boxes. New CLI: `furtka update [--check]` + `furtka rollback`. New endpoints: `POST /api/furtka/update/check` + `/apply` + `GET /api/furtka/update/status`. UI: "Furtka updates" card on `/settings` shows installed vs latest, Update button runs the apply flow detached via `systemd-run`, progress polls `/update-state.json` served by Caddy so the mid-update API restart doesn't interrupt reporting. Atomic `/opt/furtka/current` symlink flip, auto-rollback on health-check failure post-restart, SHA256-verified downloads.
- **Per-app container image updates** (Phase 1). `POST /api/apps/<name>/update` runs `docker compose pull`, compares the running container's image digest to the just-pulled local image digest per service, and only restarts containers whose image actually changed. Update button on each installed-app row in `/apps`. Keeps `image: :latest` pins simple — no compose-file mutations.
- **Per-version install layout** on `/opt/furtka/`. Install now extracts the resource-manager payload to `/opt/furtka/versions/<VERSION>/` and creates `/opt/furtka/current` as an atomic symlink; updates flip the symlink in place and `systemctl link` every unit from the shipped `assets/systemd/` tree. Runtime JSON (`status.json`, `furtka.json`, `update-state.json`) moved to `/var/lib/furtka/` so self-updates never clobber it.
- **On-box UI uplevel** across three pages sharing one design system (`/style.css` served by Caddy). Redesigned landing page with a "Your apps" tile grid driven by `/api/apps`, a `fileshare` app tile that deep-links to `smb://<host>.local/files`, status tiles, and subtle "Coming next" links to `furtka.org`. `/apps` page renders real app icons inlined from each manifest's `icon.svg` (defensive SVG sanitiser — strips script/on*/javascript: content, 16 KB cap). New `/settings` page with About-this-box, Appearance, Furtka-updates, and Coming-next sections. Persistent top nav (Jakob's Law) on every page. Light-mode support via `prefers-color-scheme`.
- **Webinstaller step 2 (boot drive)** now shows size / type / health chips plus a "Recommended" badge on the auto-selected drive instead of a raw numeric score.
- **Forgejo branch protection on `main`** — no direct pushes except owner-whitelisted, required status checks (`CI / lint*`, `CI / test*`, `CI / validate-json*`), applied via the idempotent `ops/forgejo/apply-branch-protection.sh` script.
- **In-browser app settings**, so users no longer need SSH + `vim` to configure an app before first install. Manifest gains optional `settings` (name/label/description/type/required/default) and `description_long` fields. Installing a bundled app opens a form rendered from the manifest; installed apps grow a "Settings" button that edits merged values (password fields blank = keep current). API: `POST /api/apps/install` now accepts a `settings` object in the JSON body; new `GET`/`POST /api/apps/<name>/settings` for inspecting and updating an installed app. Password values never leave the server.
- `nano` added to the installer package list so users have a beginner-friendly editor at the console/SSH (was `vim`-only, which `command not found`'d under Arch 4.x because it was actually missing from the package set too).
- `openssh` added explicitly to the installer package list and `sshd` added to enabled services. `archinstall: true` in archinstall 4.x did not actually install openssh-server, so the documented recovery path (SSH → edit `.env`) silently failed.
- **Forgejo Actions runner** live on Proxmox VM (`forge-runner-01`, Ubuntu 24.04) with DinD sidecar — CI green end-to-end. Setup scripts in `ops/forgejo-runner/`.
- **Walking-skeleton live ISO** (`iso/build.sh`). Overlays an Arch `releng` profile with Flask + the webinstaller, bakes a systemd unit that auto-starts the wizard on boot, produces a hybrid BIOS/UEFI ISO via `mkarchiso` in a privileged `archlinux:latest` container. Tested booting under OVMF in Proxmox — wizard screens 13 respond at `http://<vm-ip>:5000`.
- **Public website at [furtka.org](https://furtka.org)** (`website/`). Hugo static site, English + German, served from `/var/www/furtka.org` on `forge-runner-01` via nginx. Upstream openresty proxy handles TLS. Intentionally minimal single-page copy while the project is pre-alpha. Deploy is `./website/deploy.sh` (rsync + remote Hugo build); one-time VM setup in `ops/nginx/setup-vm.sh`.
### Changed
- Every on-box asset (landing page, settings page, style.css, status/welcome scripts, systemd units, Caddyfile) moved from inline Python string constants in `webinstaller/app.py` into real files under `furtka/assets/`. The installer reads them from disk at install time; the self-updater ships them in the release tarball.
- Settings-button label went from "Einstellungen" (prototyping leftover) to "Settings" — rest of the UI chrome is English.
- Keyboard layout at the TTY now follows the chosen installer language (`de``de`, `pl``pl`, `en``us`) instead of hardcoding `us`. Previously German users couldn't type `/`, `-`, or `=` at the recovery console.
- `fileshare` app: `description_long` + `settings` (SMB_USER, SMB_PASSWORD) for the new settings form. Docker-level healthcheck from `dperson/samba` is disabled in the compose override — it timed out under normal operation and marked a working share "unhealthy" in `docker ps`.
- **Project name finalized: Furtka.** Working title "Homebase" retired. Domain `furtka.org` registered via Strato 2026-04-13.
- Managed gateway NS hostnames updated from `ns1.homebase.cloud` / `ns2.homebase.cloud` to `ns1.furtka.org` / `ns2.furtka.org`.
- Python package renamed from `homebase``furtka` in `pyproject.toml`.
## [26.0-alpha] - 2026-04-13 ## [26.0-alpha] - 2026-04-13
First tagged snapshot. Pre-alpha — the installer does not yet boot, but the design is locked and the prototype components are shaped. First tagged snapshot. Pre-alpha — the installer does not yet boot, but the design is locked and the prototype components are shaped.
@ -348,25 +25,11 @@ First tagged snapshot. Pre-alpha — the installer does not yet boot, but the de
- **Reverse proxy:** Caddy (auto Let's Encrypt, simplest config) - **Reverse proxy:** Caddy (auto Let's Encrypt, simplest config)
- **Identity provider:** Authentik (bundled SSO, every app auto-wired at install) - **Identity provider:** Authentik (bundled SSO, every app auto-wired at install)
- **Managed gateway DNS:** NS delegation to `ns1.furtka.org` / `ns2.furtka.org` (wildcard cert via Let's Encrypt DNS-01) - **Managed gateway DNS:** NS delegation to `ns1.homebase.cloud` / `ns2.homebase.cloud` (wildcard cert via Let's Encrypt DNS-01)
- **Local HTTPS:** Local CA installed by user once (no browser warnings on `*.proksi.local`) - **Local HTTPS:** Local CA installed by user once (no browser warnings on `*.proksi.local`)
- **Base OS:** Arch (rolling, Debian remains fallback) - **Base OS:** Arch (rolling, Debian remains fallback)
- **Containers:** Docker + Compose - **Containers:** Docker + Compose
- **License:** AGPL-3.0 - **License:** AGPL-3.0
[Unreleased]: https://forgejo.sourcegate.online/daniel/furtka/compare/26.15-alpha...HEAD [Unreleased]: https://forgejo.sourcegate.online/daniel/homebase/compare/26.0-alpha...HEAD
[26.15-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.15-alpha [26.0-alpha]: https://forgejo.sourcegate.online/daniel/homebase/releases/tag/26.0-alpha
[26.14-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.14-alpha
[26.13-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.13-alpha
[26.12-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.12-alpha
[26.11-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.11-alpha
[26.10-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.10-alpha
[26.9-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.9-alpha
[26.8-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.8-alpha
[26.6-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.6-alpha
[26.5-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.5-alpha
[26.4-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.4-alpha
[26.3-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.3-alpha
[26.2-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.2-alpha
[26.1-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.1-alpha
[26.0-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.0-alpha

View file

@ -1,12 +1,12 @@
# Contributing to Furtka # Contributing to Homebase
Two-person project right now (Daniel + Robert). This doc exists so the conventions don't get lost as the team grows. Two-person project right now (Daniel + Robert). This doc exists so the conventions don't get lost as the team grows.
## Dev setup ## Dev setup
```bash ```bash
git clone https://forgejo.sourcegate.online/daniel/furtka.git git clone https://forgejo.sourcegate.online/daniel/homebase.git
cd furtka cd homebase
python -m venv .venv python -m venv .venv
source .venv/bin/activate source .venv/bin/activate

View file

@ -219,7 +219,7 @@ If you develop a new program, and you want it to be of the greatest possible use
To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
furtka homebase
Copyright (C) 2026 daniel Copyright (C) 2026 daniel
This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.

View file

@ -1,8 +1,8 @@
# Furtka # Homebase
**Open-source home server OS — simple enough for everyone.** · [furtka.org](https://furtka.org) **Open-source home server OS — simple enough for everyone.**
> "Furtka" is Polish for *gate* — a play on the gateway concept (reverse proxy + DNS as your home's front door). > Working title: **Homebase**. Robert's internal codename: **Furtka** (Polish for "gate" — plays on the gateway concept). FurtkaOS is also in the running.
Turn any x86 PC into a powerful, self-hosted home server with an app-store experience. No terminal skills required. Turn any x86 PC into a powerful, self-hosted home server with an app-store experience. No terminal skills required.
@ -10,7 +10,7 @@ Turn any x86 PC into a powerful, self-hosted home server with an app-store exper
People are tired of big companies knowing everything about them. Synology NAS comes close to solving this, but it's expensive and still too complicated for most people. People are tired of big companies knowing everything about them. Synology NAS comes close to solving this, but it's expensive and still too complicated for most people.
Furtka aims to be: Homebase aims to be:
- **As easy to install as Windows** — boot from USB, click through a wizard, done - **As easy to install as Windows** — boot from USB, click through a wizard, done
- **As easy to use as an app store** — want Nextcloud? Click install, pick a name, wait a few minutes, and you have `nextcloud.yourdomain.de` - **As easy to use as an app store** — want Nextcloud? Click install, pick a name, wait a few minutes, and you have `nextcloud.yourdomain.de`
@ -52,7 +52,7 @@ Furtka aims to be:
| Installation | Web-based wizard | Robert's webapp prototype (device reader + form → JSON) is working. Full spec: [wizard-flow.md](docs/wizard-flow.md) | | Installation | Web-based wizard | Robert's webapp prototype (device reader + form → JSON) is working. Full spec: [wizard-flow.md](docs/wizard-flow.md) |
| Reverse proxy | Caddy | Automatic Let's Encrypt, simplest config of any reverse proxy | | Reverse proxy | Caddy | Automatic Let's Encrypt, simplest config of any reverse proxy |
| Identity provider | Authentik | Bundled SSO from day one — every app template auto-wires to it at install | | Identity provider | Authentik | Bundled SSO from day one — every app template auto-wires to it at install |
| Managed gateway DNS | NS delegation to `ns1.furtka.org` | User delegates once at registrar; we handle wildcard cert + subdomain creation | | Managed gateway DNS | NS delegation to `ns1.homebase.cloud` | User delegates once at registrar; we handle wildcard cert + subdomain creation |
| Local HTTPS | Local CA | One-click CA install → green padlock on every service, no browser warnings | | Local HTTPS | Local CA | One-click CA install → green padlock on every service, no browser warnings |
| Gateway | Flexible | Own reverse proxy OR managed through our infrastructure | | Gateway | Flexible | Own reverse proxy OR managed through our infrastructure |
| UI approach | UI-first | Design the simplest possible UI, then build everything to match | | UI approach | UI-first | Design the simplest possible UI, then build everything to match |
@ -103,33 +103,17 @@ None of these nail the "your dad can set this up" experience. The installer wiza
- [x] Arch running on Proxmox, custom image builds in progress (Robert) - [x] Arch running on Proxmox, custom image builds in progress (Robert)
- [x] Competitor analysis — see [docs/competitors.md](docs/competitors.md) - [x] Competitor analysis — see [docs/competitors.md](docs/competitors.md)
- [x] Wizard flow spec — see [docs/wizard-flow.md](docs/wizard-flow.md) - [x] Wizard flow spec — see [docs/wizard-flow.md](docs/wizard-flow.md)
- [x] Release process + CI — CalVer tags, conventional commits, Forgejo Actions (ruff, pytest, JSON, link checks), `26.0-alpha` tagged - [ ] **Base OS bootable image** — Robert gets a minimal Arch image that boots, runs Docker, serves the installer webapp at `https://proksi.local` *(next blocker)*
- [x] Forgejo runner live on Proxmox VM (`forge-runner-01`, Ubuntu 24.04) — docker-outside-of-docker with host-mode jobs for ISO builds, setup captured in [docs/runner-setup.md](docs/runner-setup.md) + [ops/forgejo-runner/](ops/forgejo-runner/) - [ ] Installer wizard screens S5S8 (domain, SSL, diagnostic, confirm)
- [x] **ISO-build in CI**`.forgejo/workflows/build-iso.yml` runs `iso/build.sh` on every push to `main` and publishes the resulting `.iso` as the `furtka-iso` artifact (14 d retention). Push → green run → download → test.
- [x] **Forgejo Releases + tag-driven release pipeline**`.forgejo/workflows/release.yml` fires on `[0-9]*` tags, `scripts/build-release-tarball.sh` packages `furtka/` + `apps/` + `assets/` + a root VERSION, `scripts/publish-release.sh` uploads tarball + sha256 + release.json to the Forgejo releases page. Releases `26.1-alpha`, `26.3-alpha`, and `26.4-alpha` live at [releases](https://forgejo.sourcegate.online/daniel/furtka/releases) (26.2 stalled on a `jq` apt hang, fixed in 26.3). Needs one repo secret (`FORGEJO_RELEASE_TOKEN`).
- [x] **Walking-skeleton live ISO — end to end**`iso/build.sh` produces a hybrid BIOS/UEFI Arch-based ISO. It boots in a Proxmox VM, DHCPs onto the LAN, shows a console welcome with `http://proksi.local:5000` (+ IP fallback), serves the Flask webinstaller, runs `archinstall --silent`, reboots the VM via a Reboot-now button, and the installed system logs in and runs `docker ps` without sudo. Build infra in [`iso/`](iso/).
- [x] **Drop loop/rom devices from drive list**`webinstaller/drives.py` filters by `lsblk` `TYPE=disk`, so the live squashfs and CD-ROM no longer appear as install targets. The boot USB itself is also filtered: on the live ISO, `findmnt /run/archiso/bootmnt` resolves the boot partition and its parent disk is dropped from the picker.
- [x] **Rebrand GRUB menu**`iso/build.sh` rewrites "Arch Linux install medium" → "Furtka Live Installer" across GRUB, syslinux, and systemd-boot configs; default entry marked `(Recommended)`.
- [x] **Wizard: account form → drive picker → overview → archinstall** — S1 collects hostname/user/password/language with validation, S2 picks boot drive, overview confirms, `/install/run` writes `user_configuration.json` + `user_credentials.json` (0600) and execs `archinstall --silent` against its 4.x schema (`default_layout` disk_config + `!root-password` / `!password` sentinel keys + `custom_commands` for post-install group joins). Install log page polls a JSON endpoint and renders a phase-based progress bar with a collapsible raw log. `FURTKA_DRY_RUN=1` skips the real exec for testing.
- [x] **mDNS `proksi.local`** — hostname baked into the live ISO, avahi + nss-mdns in the package list, advertised as soon as network-online fires. The HTTPS + local-CA half of this milestone is still open below.
- [x] **Base OS post-install (demo level)** — after reboot the installed system comes up with Caddy on `:80` serving a Furtka landing page (welcome + live uptime/Docker/disk tiles), the console shows a banner pointing at `http://<hostname>.local`, and `nss-mdns` makes that URL resolve on the LAN. Written by `webinstaller/app.py`'s `_post_install_commands` via archinstall's `custom_commands`.
- [x] **Resource manager + first bundled app (`fileshare`/SMB)**`furtka/` Python package handles scan / install / remove / reinstall of apps shipped under `apps/`. Manifest schema with settings fields drives an in-browser config form (no SSH needed). First app is a `dperson/samba` share mountable from Mac/Win/Linux. Validated end-to-end on VM 2026-04-16.
- [x] **On-box web UI uplevel** — shared `/style.css` served by Caddy, persistent top nav, landing page with an "Your apps" tile grid + live status, `/apps` with real per-app icons (inlined SVG from each manifest), new `/settings` page (hostname, IP, version, kernel, RAM, Docker, uptime + Furtka-updates card). `prefers-color-scheme` light/dark.
- [x] **Versioned on-box layout + Phase 1 per-app updates**`/opt/furtka/versions/<ver>/` + `current` symlink; `/var/lib/furtka/` for runtime state. `POST /api/apps/<name>/update` runs `docker compose pull` + compares digests + conditional `up -d`.
- [x] **Phase 2 Furtka self-update**`/settings` → Check → Update now. Downloads signed tarball (SHA256), stages, atomic symlink flip, reloads Caddy, daemon-reload, restarts services, health-checks the new api with auto-rollback on failure. CLI: `furtka update [--check]` + `furtka rollback`. Validated end-to-end on VM 2026-04-16 (`26.0-alpha``26.3-alpha` → rollback → reboot).
- [x] **Local HTTPS Phase 1** — Caddy `tls internal` on `:443` is fully opt-in via the `/settings` toggle (26.15-alpha); fresh installs stay HTTP-only so a half-trusted cert chain can't lock the user out. Per-box root CA generated on first enable, `rootCA.crt` downloadable from `/settings`, per-OS install guide at `/https-install/`. The "force HTTPS" sub-toggle still only appears once the current browser already trusts the cert.
- [x] **Post-build smoke VM on Proxmox**`.forgejo/workflows/build-iso.yml` hands the freshly built ISO to `scripts/smoke-vm.sh`, which boots it in a throwaway VM on `pollux` (192.168.178.165) and curls the webinstaller on `:5000`. VMID range 90009099, last 5 kept. Green end-to-end since 26.4-alpha.
- [ ] Installer wizard screens S3S7 — per-device purpose, network, domain, SSL, diagnostic. S5/S6 blocked on managed-gateway DNS infra not yet built.
- [ ] Local HTTPS Phase 2 — dedicated local CA (not Caddy's `tls internal`), streamlined one-click install across Win/Mac/Linux/Android, and HTTPS on the live-installer wizard (`https://proksi.local:5000`).
- [ ] Caddy + Authentik wired into first-boot bootstrap - [ ] Caddy + Authentik wired into first-boot bootstrap
- [ ] Managed gateway infrastructure — `ns1/ns2.furtka.org` + DNS-01 wildcard automation - [ ] Managed gateway infrastructure — `ns1/ns2.homebase.cloud` + DNS-01 wildcard automation
- [ ] First containerized service (Nextcloud?) with auto-SSO + auto-subdomain - [ ] First containerized service (Nextcloud?) with auto-SSO + auto-subdomain
- [ ] Competitor hands-on testing on Proxmox — validate findings from docs/competitors.md - [ ] Competitor hands-on testing on Proxmox — validate findings from docs/competitors.md
- [ ] UI mockups / drafts (Robert) - [ ] UI mockups / drafts (Robert)
## Business Model ## Business Model
Furtka starts as a private/personal project. The long-term model follows Proxmox: Homebase starts as a private/personal project. The long-term model follows Proxmox:
- **Free & open source** — anyone can download, install, and use it - **Free & open source** — anyone can download, install, and use it
- **Paid support & managed infrastructure** — for users who want hassle-free setup - **Paid support & managed infrastructure** — for users who want hassle-free setup

View file

@ -1,6 +1,6 @@
# Releasing # Releasing
Furtka uses calendar versioning: **`YY.N-stage`** — e.g. `26.0-alpha` is 2026, release 0, alpha stage. No `v` prefix. Homebase uses calendar versioning: **`YY.N-stage`** — e.g. `26.0-alpha` is 2026, release 0, alpha stage. No `v` prefix.
- `YY` — last two digits of the current year - `YY` — last two digits of the current year
- `N` — incrementing release number within the year, starting at 0 (next one in 2026 is `26.1-alpha`, then `26.2-alpha`…) - `N` — incrementing release number within the year, starting at 0 (next one in 2026 is `26.1-alpha`, then `26.2-alpha`…)
@ -24,7 +24,7 @@ Tag per meaningful milestone, not on a calendar. A milestone is: ISO boots, a wi
``` ```
Add a `[26.1-alpha]` link definition at the bottom: Add a `[26.1-alpha]` link definition at the bottom:
```markdown ```markdown
[26.1-alpha]: https://forgejo.sourcegate.online/daniel/furtka/releases/tag/26.1-alpha [26.1-alpha]: https://forgejo.sourcegate.online/daniel/homebase/releases/tag/26.1-alpha
``` ```
Update the `[Unreleased]` compare link to point at the new tag. Update the `[Unreleased]` compare link to point at the new tag.
@ -45,14 +45,13 @@ Tag per meaningful milestone, not on a calendar. A milestone is: ISO boots, a wi
git push origin 26.1-alpha git push origin 26.1-alpha
``` ```
5. **The release workflow does the rest.** `.forgejo/workflows/release.yml` fires on the tag push and runs on the self-hosted runner: `scripts/build-release-tarball.sh` builds the self-update payload (tarball + sha256 + release.json under `dist/`), `iso/build.sh` builds the live-installer ISO, `scripts/publish-release.sh` uploads tarball + sha256 + release.json + ISO to the Forgejo release page. Pre-release is flagged automatically based on the suffix (`-alpha`/`-beta`/`-rc`). ISO build is `continue-on-error`: a flaky ISO step doesn't block the core tarball (the thing boxes need for self-update). 5. **Create a Forgejo Release** at `https://forgejo.sourcegate.online/daniel/homebase/releases/new`:
- Tag: `26.1-alpha` (already exists)
- Title: `26.1-alpha`
- Body: paste the changelog section for this version
- Tick **Pre-release** for anything still `-alpha` or `-beta`
The release workflow needs one secret set at repo **Settings → Secrets → Actions**: 6. **Verify CI passed on the tag.** The Forgejo Actions run against the tagged commit should be green before you announce the release anywhere.
- `FORGEJO_RELEASE_TOKEN` — a PAT with `write:repository` scope.
6. **Verify CI passed on the tag.** The Forgejo Actions run against the tagged commit should be green before you announce the release anywhere — both the CI workflow (lint/test) and the Release workflow (tarball published).
7. **(Optional) Dogfood the update path.** On a VM running the previous version, `sudo furtka update --check` should now see the new tag, and `sudo furtka update` applies it without a reinstall.
## First-time: find the current version ## First-time: find the current version

View file

@ -1,145 +0,0 @@
# Building a Furtka app from a Docker image
A Furtka app is a folder with four files. The reconciler walks `/var/lib/furtka/apps/*` at boot, validates each manifest, ensures the declared volumes exist, and runs `docker compose up -d` per app. Filesystem is the only source of truth — no database.
Use `apps/fileshare/` as the reference implementation.
## Folder layout
```
apps/<name>/
manifest.json # required — app metadata and user-facing settings
docker-compose.yaml # required — filename is .yaml, not .yml
.env.example # required — keys consumed by docker-compose, with safe defaults
icon.svg # required — referenced by manifest.icon
```
The folder name must equal `manifest.name`. The scanner rejects mismatches.
## `manifest.json`
All top-level fields except `description_long` and `settings` are required.
```json
{
"name": "myapp",
"display_name": "My App",
"version": "0.1.0",
"description": "One-line summary shown in the app list.",
"description_long": "Longer German prose shown on the app page. Optional.",
"volumes": ["data"],
"ports": [8080],
"icon": "icon.svg",
"settings": [
{
"name": "ADMIN_PASSWORD",
"label": "Passwort",
"description": "Wird beim ersten Start gesetzt.",
"type": "password",
"required": true
}
]
}
```
Rules enforced by `furtka/manifest.py`:
- `volumes` — short names, strings. Namespaced to `furtka_<app>_<short>` at runtime.
- `ports` — integers. Informational only; compose owns the actual port binding.
- `settings[].name` — must match `^[A-Z_][A-Z0-9_]*$`. This name becomes both the env-var key and the form-field ID.
- `settings[].type` — one of `text`, `password`, `number`, `path`.
- `settings[].required` — if true, the install refuses when the value is empty.
- `settings[].default` — optional string. Used to pre-fill the form and the bootstrapped `.env`.
### Path-type settings (host bind mounts)
Use `"type": "path"` when the app should point at an existing folder on the host — media libraries, document archives, photo backups. The value is written to `.env` like any other setting, and compose consumes it via `${VAR}` substitution as a bind mount.
```json
{
"name": "MEDIA_PATH",
"label": "Medienordner",
"description": "Absoluter Pfad zu deinem Medien-Ordner, z.B. /mnt/media.",
"type": "path",
"required": true
}
```
```yaml
services:
app:
volumes:
- ${MEDIA_PATH}:/media:ro
```
The installer (`install_from` and `update_env`) refuses values that:
- aren't absolute (must start with `/`),
- don't exist on the host,
- aren't directories,
- resolve (after `Path.resolve()`) into a system-path deny-list: `/`, `/etc`, `/root`, `/boot`, `/proc`, `/sys`, `/dev`, `/bin`, `/sbin`, `/usr/bin`, `/usr/sbin`, `/var/lib/furtka`.
Traversal like `/mnt/../etc` is caught too — the deny-list check runs on the resolved path.
Path settings sit alongside manifest-declared volumes. Use `manifest.volumes` for internal state the app owns (databases, caches, config), and path settings for user data the container should mount and — usually — read without owning. Mounting read-only (`:ro`) is a good default for data the app only consumes.
## `docker-compose.yaml`
- File extension is `.yaml`. The compose runner hardcodes this — `.yml` will not be found.
- Reference manifest volumes as `furtka_<app>_<short>` with `external: true`. The reconciler creates the volume *before* `compose up`, so compose must not try to manage its lifecycle.
- Values from `.env` are substituted by compose in the usual `${VAR}` form.
- If the upstream image ships a HEALTHCHECK that misbehaves on Furtka's setup, disable it — a permanently-unhealthy container scares users reading `docker ps`.
- Pin images to a digest or stable tag when you can. `:latest` is acceptable for an MVP but noisy.
Minimal example:
```yaml
services:
app:
image: ghcr.io/example/myapp:1.2.3
restart: unless-stopped
environment:
- ADMIN_PASSWORD=${ADMIN_PASSWORD}
ports:
- "8080:8080"
volumes:
- furtka_myapp_data:/var/lib/myapp
volumes:
furtka_myapp_data:
external: true
```
## `.env.example`
One `KEY=VALUE` per line. Every key declared in `manifest.settings` should have a line here so the compose file resolves cleanly on first install even before the user opens the form.
Do not use `changeme` (or any value listed in `furtka.installer.PLACEHOLDER_SECRETS`) as the default for a required secret. The install step scans the final `.env` and refuses to finish if a placeholder survives — this is the guardrail that stops us shipping an app with a known password.
For non-secret values (usernames, paths), sensible defaults are fine and go straight into `.env` on first install.
## `icon.svg`
- 64×64 viewBox, no width/height attributes so the UI can scale it.
- Use `fill="currentColor"` (and `stroke="currentColor"`) so the icon picks up the current theme instead of baking in a color.
- Keep it single-path-ish. These render small in the app grid.
- The icon is inlined into the `/apps` page by the defensive SVG sanitiser, which strips `<script>`, `on*` attributes, and `javascript:` refs and enforces a 16 KB cap. Anything fancier than static paths and shapes will be rejected.
## Install and test
From the repo root on a dev box with Furtka installed:
```
sudo furtka app install ./apps/myapp
```
`furtka app install` runs a reconcile as its last step, so the container is up once the command returns. Open the Web UI (`http://furtka.local/`), fill in the settings form, and confirm the app starts. `docker ps` should show one container per compose service; `docker volume ls` should show `furtka_myapp_*`.
To bundle the app into the ISO, drop the folder into `apps/` before `iso/build.sh` runs — the build tarballs the whole `apps/` tree into the image.
## Out of scope (for now)
- Sharing volumes between apps. v1 keeps them isolated.
- Auth on the Web UI. The UI itself has a banner about this.
- Automatic updates. User-triggered per-app update is `POST /api/apps/<name>/update`.
- A network catalog. `furtka app install <name>` only resolves bundled apps in `/opt/furtka/apps/`.

View file

@ -1,2 +0,0 @@
SMB_USER=furtka
SMB_PASSWORD=changeme

View file

@ -1,39 +0,0 @@
# Furtka fileshare — SMB share via dperson/samba.
#
# The volume `furtka_fileshare_files` is created by the Furtka reconciler
# from the manifest's "volumes" list before this compose file is brought up;
# it's referenced as `external: true` here so docker compose doesn't try
# to manage its lifecycle.
#
# TODO(image-pin): `:latest` is shaky for production — pin to a digest
# (`dperson/samba@sha256:...`) or a stable tag once we've verified one
# against the upstream registry. For the MVP run we accept the drift
# risk to keep the install reproducible against whatever the upstream
# image happens to be on test day; revisit before any non-developer
# touches this.
services:
smbd:
image: dperson/samba:latest
restart: unless-stopped
network_mode: host
# The upstream image's HEALTHCHECK times out under normal operation on
# our setup (2026-04-15 VM test — all 6 probes failed while the share
# was reachable from clients). Disable to avoid a permanently-"unhealthy"
# container that scares users reading `docker ps`.
healthcheck:
disable: true
environment:
- USERID=1000
- GROUPID=1000
- TZ=Europe/Berlin
command: >
-u "${SMB_USER};${SMB_PASSWORD}"
-s "files;/mount;yes;no;no;${SMB_USER}"
-p
volumes:
- furtka_fileshare_files:/mount
volumes:
furtka_fileshare_files:
external: true

View file

@ -1,9 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" aria-hidden="true">
<path fill="currentColor" opacity="0.28" d="M6 18 Q6 14 10 14 H22 L28 20 H54 Q58 20 58 24 V46 Q58 50 54 50 H10 Q6 50 6 46 Z"/>
<path fill="currentColor" d="M6 28 Q6 24 10 24 H54 Q58 24 58 28 V46 Q58 50 54 50 H10 Q6 50 6 46 Z"/>
<g fill="none" stroke="currentColor" stroke-width="2.5" stroke-linecap="round">
<path d="M42 12 A10 10 0 0 1 58 22"/>
<path d="M46 14 A6 6 0 0 1 56 22" opacity="0.75"/>
</g>
<circle cx="51" cy="19" r="1.8" fill="currentColor"/>
</svg>

Before

Width:  |  Height:  |  Size: 561 B

View file

@ -1,28 +0,0 @@
{
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.2",
"description": "SMB share for Mac, Windows, Linux and Android devices on the LAN.",
"description_long": "Alle Geräte im WLAN sehen einen gemeinsamen Ordner. Funktioniert mit Windows, Mac, Linux und Android. Verbinden zu smb://furtka.local — Anmeldung mit dem hier gesetzten Benutzernamen und Passwort.",
"volumes": ["files"],
"ports": [445, 139],
"icon": "icon.svg",
"open_url": "smb://{host}/files",
"settings": [
{
"name": "SMB_USER",
"label": "Benutzername",
"description": "Der Name, mit dem sich Geräte am Share anmelden.",
"type": "text",
"default": "furtka",
"required": true
},
{
"name": "SMB_PASSWORD",
"label": "Passwort",
"description": "Mindestens 8 Zeichen. Wird nie angezeigt — auch dir nicht.",
"type": "password",
"required": true
}
]
}

View file

@ -18,12 +18,10 @@
"packages": [ "packages": [
"docker", "docker",
"docker-compose", "docker-compose",
"nano",
"vim", "vim",
"git", "git",
"htop", "htop",
"curl", "curl"
"openssh"
], ],
"profile": { "profile": {
@ -31,8 +29,7 @@
}, },
"services": [ "services": [
"docker", "docker"
"sshd"
], ],
"network_config": { "network_config": {

View file

@ -1,102 +0,0 @@
# Serves the Furtka landing page + live JSON on :80 (plain HTTP). HTTPS
# is **opt-in** — Caddy doesn't serve :443 until the user clicks the
# "Enable HTTPS" toggle on /settings, which drops an import snippet into
# /etc/caddy/furtka-https.d/. Default install has NO tls site block →
# Caddy never generates a self-signed CA / leaf cert → no
# SEC_ERROR_BAD_SIGNATURE when a user visits https://furtka.local before
# they've trusted anything. That was the 26.14-era regression this file
# exists to cure: the old Caddyfile always served :443 with a freshly-
# generated cert, and a browser that had ever trusted an older Furtka
# box's CA would reject the new one with an unbypassable bad-sig error.
#
# /apps, /api, /login, /logout, / (home), /settings are reverse-proxied
# to the resource-manager API (furtka serve, bound to 127.0.0.1:7000).
# Static pages are read from /opt/furtka/current/ — updates flip the
# symlink and everything picks up the new content without a Caddy
# restart (a `systemctl reload caddy` is still triggered post-swap to
# flush the file-server's handle cache).
#
# Two snippet dirs, both silently no-op when empty:
# - /etc/caddy/furtka.d/*.caddyfile → imported inside the :80 block.
# The HTTPS toggle's "force HTTP→HTTPS redirect" snippet lands here.
# - /etc/caddy/furtka-https.d/*.caddyfile → imported at TOP LEVEL, so
# the HTTPS hostname+tls-internal site block can drop in here when
# the toggle is on. Hostname is substituted at toggle-time.
{
# Named-hostname :443 blocks would otherwise make Caddy add its own
# HTTP→HTTPS redirect — but we already serve our own `:80` block and
# the opt-in /settings toggle owns the redirect. Disable the built-in
# to keep a single source of truth.
auto_https disable_redirects
}
(furtka_routes) {
handle /api/* {
reverse_proxy localhost:7000
}
handle /apps* {
reverse_proxy localhost:7000
}
handle /login* {
reverse_proxy localhost:7000
}
handle /logout* {
reverse_proxy localhost:7000
}
# /settings and / — these previously served as static HTML straight
# from the catch-all file_server, which meant the auth-guard was
# bypassed: a LAN visitor could see the box's version, IP, and
# reach the Update-now / Reboot buttons (the API calls behind them
# are auth-gated, but the page itself rendered without a redirect
# to /login). Route them through the Python handler which checks
# the session cookie and either serves the static HTML from
# assets/www/ or redirects to /login.
handle /settings* {
reverse_proxy localhost:7000
}
handle / {
reverse_proxy localhost:7000
}
# Runtime JSON lives under /var/lib/furtka/ so it survives self-updates
# (which only swap /opt/furtka/current).
handle /status.json {
root * /var/lib/furtka
file_server
}
handle /furtka.json {
root * /var/lib/furtka
file_server
}
handle /update-state.json {
root * /var/lib/furtka
file_server
}
# Download the local root CA cert Caddy generated for `tls internal`.
# Public because users need to grab it before they've trusted it.
# The private key next to it stays 0600 / caddy-owned.
handle /rootCA.crt {
root * /var/lib/caddy/pki/authorities/local
rewrite * /root.crt
file_server
header Content-Type "application/x-x509-ca-cert"
header Content-Disposition "attachment; filename=furtka-local-rootCA.crt"
}
handle {
root * /opt/furtka/current/assets/www
file_server
encode gzip
}
log {
output stdout
}
}
# HTTPS opt-in: when /settings toggles HTTPS on, a snippet gets written
# into /etc/caddy/furtka-https.d/ that adds the hostname+tls-internal
# site block. Empty directory = HTTP-only (default fresh install).
import /etc/caddy/furtka-https.d/*.caddyfile
:80 {
import /etc/caddy/furtka.d/*.caddyfile
import furtka_routes
}

View file

@ -1,40 +0,0 @@
#!/bin/bash
# Writes /var/lib/furtka/status.json with current system stats. Fired by
# furtka-status.timer every 30s; also runs once 10s after boot. Path is under
# /var/lib/ so self-updates (which swap /opt/furtka/current) don't clobber it.
set -e
out=/var/lib/furtka/status.json
tmp=$(mktemp)
mkdir -p /var/lib/furtka
hostname=$(cat /etc/hostname)
uptime=$(uptime -p 2>/dev/null | sed 's/^up //' || echo unknown)
if command -v docker >/dev/null 2>&1; then
docker_version=$(docker --version 2>/dev/null | awk '{print $3}' | tr -d ',' || echo unavailable)
else
docker_version=unavailable
fi
disk_free=$(df -h / 2>/dev/null | awk 'NR==2 {print $4 " free of " $2}' || echo unknown)
ip_primary=$(ip -4 -o addr show scope global 2>/dev/null | awk '{print $4}' | cut -d/ -f1 | head -1 || true)
kernel=$(uname -r 2>/dev/null || echo unknown)
ram_total=$(free -h --si 2>/dev/null | awk '/^Mem:/ {print $2}' || echo unknown)
furtka_version=$(cat /opt/furtka/current/VERSION 2>/dev/null || echo dev)
updated_at=$(date -Iseconds)
cat > "$tmp" <<EOF
{
"hostname": "$hostname",
"uptime": "$uptime",
"docker_version": "$docker_version",
"disk_free": "$disk_free",
"ip_primary": "$ip_primary",
"kernel": "$kernel",
"ram_total": "$ram_total",
"furtka_version": "$furtka_version",
"updated_at": "$updated_at"
}
EOF
mv "$tmp" "$out"
chmod 644 "$out"

View file

@ -1,22 +0,0 @@
#!/bin/bash
# Regenerates /etc/issue on the installed system so the console tells the
# user which URL to open. Mirrors the live-ISO furtka-update-issue pattern.
set -e
hostname=$(cat /etc/hostname)
ip=$(ip -4 -o addr show scope global 2>/dev/null | awk '{print $4}' | cut -d/ -f1 | head -1)
{
echo
echo " Furtka is ready."
echo
echo " Open in a browser on another device on your network:"
echo
echo " http://${hostname}.local (easy — try this first)"
if [ -n "$ip" ]; then
echo " http://${ip} (fallback if the first doesn't work)"
fi
echo
} > /etc/issue
agetty --reload 2>/dev/null || true

View file

@ -1,14 +0,0 @@
[Unit]
Description=Furtka resource-manager HTTP API + UI
Requires=docker.service
After=docker.service network-online.target furtka-reconcile.service
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/local/bin/furtka serve --host 127.0.0.1 --port 7000
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View file

@ -1,12 +0,0 @@
[Unit]
Description=Furtka apps catalog sync
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/furtka catalog sync
TimeoutStartSec=5min
[Install]
WantedBy=multi-user.target

View file

@ -1,14 +0,0 @@
[Unit]
Description=Furtka apps catalog daily sync
[Timer]
# First sync 10 min after boot, then once per day with up to 6 h jitter so
# a fleet of boxes doesn't all hit Forgejo at the same second. Persistent
# = catch up if the box was off when the timer should have fired.
OnBootSec=10min
OnUnitActiveSec=24h
RandomizedDelaySec=6h
Persistent=true
[Install]
WantedBy=timers.target

View file

@ -1,13 +0,0 @@
[Unit]
Description=Furtka app reconciler (boot-scan)
Requires=docker.service
After=docker.service network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/furtka reconcile
RemainAfterExit=no
[Install]
WantedBy=multi-user.target

View file

@ -1,7 +0,0 @@
[Unit]
Description=Refresh Furtka system status JSON
After=network-online.target
[Service]
Type=oneshot
ExecStart=/opt/furtka/current/assets/bin/furtka-status

View file

@ -1,10 +0,0 @@
[Unit]
Description=Refresh Furtka system status every 30s
[Timer]
OnBootSec=10s
OnUnitActiveSec=30s
AccuracySec=5s
[Install]
WantedBy=timers.target

View file

@ -1,12 +0,0 @@
[Unit]
Description=Furtka console welcome banner
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/opt/furtka/current/assets/bin/furtka-welcome
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View file

@ -1,159 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Install local HTTPS · Furtka</title>
<meta name="viewport" content="width=device-width,initial-scale=1">
<link rel="stylesheet" href="/style.css">
</head>
<body>
<main class="wrap">
<nav class="nav">
<a class="brand" href="/">Furtka</a>
<div class="nav-links">
<a href="/">Home</a>
<a href="/apps">Apps</a>
<a href="/settings/" aria-current="page">Settings</a>
</div>
</nav>
<h1>Install local HTTPS</h1>
<p class="lede">
Trust the Furtka root CA on your device, then reach this box at
<code>https://<span id="hostname"></span>/</code> with a green padlock.
HTTP stays available until you enable the redirect in
<a class="inline-link" href="/settings/">Settings</a>.
</p>
<section>
<h2>Download the CA</h2>
<div class="card">
<dl class="kv">
<dt>Fingerprint (SHA-256)</dt><dd id="fingerprint"></dd>
</dl>
<p class="hint">
Check this fingerprint matches what <code>/settings</code> shows before
trusting it on another device. The root CA is unique to this box.
</p>
<div class="update-actions">
<button id="download-btn" class="secondary">Download rootCA.crt</button>
</div>
</div>
</section>
<section>
<h2>Linux (system-wide)</h2>
<div class="card">
<p class="hint">Arch / Fedora / RHEL:</p>
<pre>sudo cp rootCA.crt /etc/ca-certificates/trust-source/anchors/furtka-local.crt
sudo update-ca-trust</pre>
<p class="hint">Debian / Ubuntu:</p>
<pre>sudo cp rootCA.crt /usr/local/share/ca-certificates/furtka-local.crt
sudo update-ca-certificates</pre>
<p class="hint">
Firefox keeps its own certificate store. After the above, open
<code>about:preferences#privacy</code><em>View Certificates</em>
<em>Authorities</em><em>Import</em>, pick <code>rootCA.crt</code>,
tick <em>Trust this CA to identify websites</em>.
</p>
</div>
</section>
<section>
<h2>macOS</h2>
<div class="card">
<ol>
<li>Double-click <code>rootCA.crt</code>. Keychain Access opens.</li>
<li>When prompted, add it to the <strong>System</strong> keychain.</li>
<li>Find the <em>Furtka</em> entry, double-click, expand <em>Trust</em>,
set <em>When using this certificate</em> to <strong>Always Trust</strong>.</li>
<li>Close the window — you will be asked for your password.</li>
</ol>
</div>
</section>
<section>
<h2>Windows</h2>
<div class="card">
<ol>
<li>Double-click <code>rootCA.crt</code>.</li>
<li>Click <strong>Install Certificate</strong>.</li>
<li>Choose <strong>Local Machine</strong> (requires admin) and click <em>Next</em>.</li>
<li>Select <strong>Place all certificates in the following store</strong>
<em>Browse</em><strong>Trusted Root Certification Authorities</strong>.</li>
<li>Finish. Chrome and Edge pick this up immediately. Firefox keeps its
own store — import the same file via Firefox settings.</li>
</ol>
</div>
</section>
<section>
<h2>Android</h2>
<div class="card">
<ol>
<li>Transfer <code>rootCA.crt</code> to the device (AirDrop, email,
USB — whatever is handy).</li>
<li>Settings → <em>Security</em> (or <em>Security &amp; privacy</em>)
<em>More security settings</em><em>Encryption &amp; credentials</em>
<em>Install a certificate</em><strong>CA certificate</strong>.</li>
<li>Confirm the warning, then pick the file.</li>
</ol>
<p class="hint">
Android 11+ only trusts user-installed CAs for browsers by default.
Some apps (banking, Play services) ignore them. Not a Furtka bug —
an Android policy choice.
</p>
</div>
</section>
<section>
<h2>iOS &amp; iPadOS</h2>
<div class="card">
<p class="hint">
Honest warning: iOS needs a signed configuration profile for a
properly trusted CA. What works today:
</p>
<ol>
<li>Email <code>rootCA.crt</code> to yourself and open the attachment
in Mail. iOS prompts to install a profile.</li>
<li>Settings → <em>General</em><em>VPN &amp; Device Management</em>
→ tap the Furtka profile → <strong>Install</strong>.</li>
<li>Settings → <em>General</em><em>About</em><em>Certificate
Trust Settings</em> → toggle <strong>Furtka</strong> on.</li>
</ol>
<p class="hint">
A packaged <code>.mobileconfig</code> makes this smoother; it's on
the roadmap but not in this release.
</p>
</div>
</section>
<footer>
<p>Furtka · <a href="https://furtka.org">furtka.org</a></p>
</footer>
</main>
<script>
document.getElementById('hostname').textContent = location.hostname;
document.getElementById('download-btn').addEventListener('click', () => {
const a = document.createElement('a');
a.href = '/rootCA.crt';
a.download = 'furtka-local-rootCA.crt';
document.body.appendChild(a);
a.click();
a.remove();
});
(async () => {
try {
const r = await fetch('/api/furtka/https/status', { cache: 'no-store' });
if (!r.ok) return;
const s = await r.json();
document.getElementById('fingerprint').textContent =
s.fingerprint_sha256 || 'waiting for Caddy…';
} catch (e) { /* keep the placeholder */ }
})();
</script>
</body>
</html>

View file

@ -1,171 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Furtka</title>
<meta name="viewport" content="width=device-width,initial-scale=1">
<link rel="stylesheet" href="/style.css">
</head>
<body>
<main class="wrap">
<nav class="nav">
<a class="brand" href="/">Furtka</a>
<div class="nav-links">
<a href="/" aria-current="page">Home</a>
<a href="/apps">Apps</a>
<a href="/settings/">Settings</a>
<a href="#" id="logout-link" onclick="return doLogout(event)">Logout</a>
</div>
</nav>
<header>
<h1>Welcome to Furtka</h1>
<p class="lead">Your home server is ready.</p>
<p class="host">Running on <code id="hostname"></code></p>
</header>
<section>
<h2>Your apps</h2>
<div id="apps-section" class="grid-apps">
<a class="app-tile" href="/apps"><span class="name">Loading…</span></a>
</div>
</section>
<section>
<h2>System status</h2>
<div class="tiles">
<div class="tile">
<span class="label">Uptime</span>
<span class="value" id="uptime"></span>
</div>
<div class="tile">
<span class="label">Docker</span>
<span class="value" id="docker"></span>
</div>
<div class="tile">
<span class="label">Free disk</span>
<span class="value" id="disk"></span>
</div>
</div>
<p class="updated">Updated <span id="updated"></span></p>
</section>
<section>
<h2>Coming next</h2>
<div class="coming">
<p class="hint">Features we're building — follow progress on <a href="https://furtka.org">furtka.org</a>.</p>
<a href="https://furtka.org/#planned">Photos</a>
<a href="https://furtka.org/#planned">Smart home</a>
<a href="https://furtka.org/#planned">Media streaming</a>
<a href="https://furtka.org/#planned">Multiple boxes</a>
<a href="https://furtka.org/#planned">Secure link</a>
<a href="https://furtka.org/#planned">User accounts</a>
</div>
</section>
<footer>
<p>Furtka · <a href="https://furtka.org">furtka.org</a></p>
</footer>
</main>
<script>
// Revoke the cookie server-side and bounce to /login. Shared
// shape with the _HTML in furtka/api.py so the two logout
// links behave identically.
async function doLogout(ev) {
ev.preventDefault();
try { await fetch('/logout', { method: 'POST', credentials: 'same-origin' }); }
catch (e) { /* server may already be down */ }
window.location.href = '/login';
return false;
}
// Hostname + install metadata — written once at install time to
// /var/lib/furtka/furtka.json (see _furtka_json_cmd in the installer).
// Separate from status.json because these facts don't change between
// refresh ticks.
let HOSTNAME = "";
const FALLBACK_ICON = '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"><path d="M3 7v12a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2V9a2 2 0 0 0-2-2h-7l-2-2H5a2 2 0 0 0-2 2z"/></svg>';
function esc(s) {
const d = document.createElement('div');
d.textContent = s == null ? '' : String(s);
return d.innerHTML;
}
async function loadFurtkaJson() {
try {
const r = await fetch('/furtka.json', { cache: 'no-store' });
if (!r.ok) return;
const f = await r.json();
HOSTNAME = f.hostname || "";
const el = document.getElementById('hostname');
if (el) el.textContent = HOSTNAME || '—';
} catch (e) { /* no-op */ }
}
function primaryAction(app) {
// open_url is a manifest-declared template with a `{host}`
// placeholder — substituted against the current browser's
// hostname so smb://host/files and http://host:3001/ both
// follow however the user reached Furtka (furtka.local, raw
// IP, a future reverse-proxy hostname). Apps without a
// frontend fall back to /apps for management.
if (app.open_url) {
const host = HOSTNAME || location.hostname;
return { href: app.open_url.replace('{host}', host), label: 'Open', external: true };
}
return { href: '/apps', label: 'Manage →', external: false };
}
async function renderApps() {
const target = document.getElementById('apps-section');
try {
const r = await fetch('/api/apps', { cache: 'no-store' });
if (!r.ok) throw new Error('api');
const apps = (await r.json()).filter(a => a.ok !== false);
if (!apps.length) {
target.innerHTML = '<a class="app-tile" href="/apps">' +
'<span class="name">No apps yet</span>' +
'<span class="cta">Install your first app →</span></a>';
return;
}
target.innerHTML = apps.map(a => {
const icon = a.icon_svg || FALLBACK_ICON;
const { href, label, external } = primaryAction(a);
const tgt = external ? ' target="_blank" rel="noopener"' : '';
return `<a class="app-tile" href="${esc(href)}"${tgt}>
<div class="icon">${icon}</div>
<span class="name">${esc(a.display_name || a.name)}</span>
<span class="cta">${esc(label)}</span>
</a>`;
}).join('');
} catch (e) {
target.innerHTML = '<a class="app-tile" href="/apps">' +
'<span class="name">Manage apps</span>' +
'<span class="cta">Open →</span></a>';
}
}
async function refresh() {
try {
const r = await fetch('/status.json', { cache: 'no-store' });
if (!r.ok) return;
const s = await r.json();
document.getElementById('uptime').textContent = s.uptime || '—';
document.getElementById('docker').textContent = s.docker_version || '—';
document.getElementById('disk').textContent = s.disk_free || '—';
document.getElementById('updated').textContent = s.updated_at || '—';
} catch (e) {
/* next tick will retry */
}
}
// furtka.json must land first so renderApps can build the SMB link
// with the real hostname. If it 404s (very early in boot) the
// primary-action falls back to "Manage →".
loadFurtkaJson().then(renderApps);
refresh();
setInterval(refresh, 15000);
</script>
</body>
</html>

View file

@ -1,447 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Settings · Furtka</title>
<meta name="viewport" content="width=device-width,initial-scale=1">
<link rel="stylesheet" href="/style.css">
</head>
<body>
<main class="wrap">
<nav class="nav">
<a class="brand" href="/">Furtka</a>
<div class="nav-links">
<a href="/">Home</a>
<a href="/apps">Apps</a>
<a href="/settings/" aria-current="page">Settings</a>
<a href="#" id="logout-link" onclick="return doLogout(event)">Logout</a>
</div>
</nav>
<h1>Settings</h1>
<p class="lede">What this box knows about itself.</p>
<section>
<h2>About this box</h2>
<div class="card">
<dl class="kv">
<dt>Hostname</dt><dd id="set-hostname"></dd>
<dt>IP address</dt><dd id="set-ip"></dd>
<dt>Furtka version</dt><dd id="set-version"></dd>
<dt>Kernel</dt><dd id="set-kernel"></dd>
<dt>RAM</dt><dd id="set-ram"></dd>
<dt>Docker</dt><dd id="set-docker"></dd>
<dt>Uptime</dt><dd id="set-uptime"></dd>
</dl>
</div>
</section>
<section>
<h2>Furtka updates</h2>
<div class="card">
<dl class="kv">
<dt>Installed</dt><dd id="upd-current"></dd>
<dt>Latest available</dt><dd id="upd-latest"></dd>
</dl>
<div class="update-actions">
<button id="check-updates-btn" class="secondary">Check for updates</button>
<button id="apply-update-btn" hidden>Update now</button>
</div>
<p id="update-status" class="hint"></p>
</div>
</section>
<section>
<h2>Local HTTPS</h2>
<div class="card">
<p class="lede">
Serve this box over <code>https://<span id="https-host"></span>/</code>
with a green padlock. Install the Furtka root CA once per device, then
optionally force every HTTP request to redirect.
</p>
<dl class="kv">
<dt>CA fingerprint (SHA-256)</dt><dd id="https-fingerprint"></dd>
<dt>Reachable from this browser</dt><dd id="https-reachable">checking…</dd>
</dl>
<div class="update-actions">
<button id="https-download-btn" class="secondary">Download CA (.crt)</button>
<a href="/https-install/" class="inline-link">Per-OS install guide</a>
</div>
<label class="https-toggle" hidden id="https-force-wrap">
<input type="checkbox" id="https-force">
<span>Force HTTPS (redirect plain HTTP to HTTPS)</span>
</label>
<p class="hint" id="https-force-hint" hidden>
Enable this only after you've installed the CA and confirmed
<code>https://</code> works in this browser — otherwise the redirect
will leave you with a scary certificate warning.
</p>
<p id="https-status" class="hint"></p>
</div>
</section>
<section>
<h2>Appearance</h2>
<div class="card">
<dl class="kv">
<dt>Theme</dt><dd>Follows your system setting</dd>
<dt>Language</dt><dd>English</dd>
</dl>
</div>
</section>
<section>
<h2>Power</h2>
<div class="card">
<p class="lede">
Reboot or shut down the whole Furtka box. Takes a few seconds to
finish; the UI will reconnect itself after a reboot.
</p>
<div class="power-actions">
<button type="button" id="power-reboot" class="secondary">Reboot</button>
<button type="button" id="power-poweroff" class="danger">Shut down</button>
</div>
<p id="power-status" class="hint"></p>
</div>
</section>
<section>
<h2>Coming next</h2>
<div class="coming">
<p class="hint">Controls we're building — follow progress on <a href="https://furtka.org">furtka.org</a>.</p>
<a href="https://furtka.org/#planned">Change hostname</a>
<a href="https://furtka.org/#planned">Backup</a>
<a href="https://furtka.org/#planned">User accounts</a>
<a href="https://furtka.org/#planned">Remote access</a>
</div>
</section>
<footer>
<p>Furtka · <a href="https://furtka.org">furtka.org</a></p>
</footer>
</main>
<script>
// Logout button in the nav — same shape as /apps and / pages.
async function doLogout(ev) {
ev.preventDefault();
try { await fetch('/logout', { method: 'POST', credentials: 'same-origin' }); }
catch (e) { /* server may already be down */ }
window.location.href = '/login';
return false;
}
async function refresh() {
try {
const r = await fetch('/status.json', { cache: 'no-store' });
if (!r.ok) return;
const s = await r.json();
document.getElementById('set-hostname').textContent = s.hostname || '—';
document.getElementById('set-ip').textContent = s.ip_primary || '—';
document.getElementById('set-version').textContent = s.furtka_version || '—';
document.getElementById('set-kernel').textContent = s.kernel || '—';
document.getElementById('set-ram').textContent = s.ram_total || '—';
document.getElementById('set-docker').textContent = s.docker_version || '—';
document.getElementById('set-uptime').textContent = s.uptime || '—';
document.getElementById('upd-current').textContent = s.furtka_version || '—';
} catch (e) {
/* next tick will retry */
}
}
refresh();
setInterval(refresh, 15000);
// --- Furtka updates -----------------------------------------------
const STAGE_LABELS = {
downloading: 'Downloading release…',
verifying: 'Verifying signature…',
extracting: 'Unpacking update…',
swapping: 'Switching to new version…',
restarting: 'Restarting services…',
done: 'Update complete — reloading…',
rolled_back: 'Update failed, rolled back to the previous version',
rolled_back_manual: 'Rolled back manually',
};
let pollHandle = null;
let fallbackReloadHandle = null;
const statusEl = document.getElementById('update-status');
const checkBtn = document.getElementById('check-updates-btn');
const applyBtn = document.getElementById('apply-update-btn');
function setStatus(msg, isError = false) {
statusEl.textContent = msg;
statusEl.style.color = isError ? 'var(--danger)' : 'var(--muted)';
}
checkBtn.addEventListener('click', async () => {
checkBtn.disabled = true;
const original = checkBtn.textContent;
checkBtn.textContent = 'Checking…';
setStatus('');
try {
const r = await fetch('/api/furtka/update/check', { method: 'POST' });
const data = await r.json();
if (!r.ok) {
setStatus(data.error || `HTTP ${r.status}`, true);
return;
}
document.getElementById('upd-latest').textContent = data.latest || '—';
document.getElementById('upd-current').textContent = data.current || '—';
if (data.update_available) {
applyBtn.hidden = false;
applyBtn.textContent = `Update to ${data.latest}`;
setStatus(`Update available: ${data.current} → ${data.latest}`);
} else {
applyBtn.hidden = true;
setStatus('Already up to date');
}
} catch (e) {
setStatus(`Network error: ${e.message}`, true);
} finally {
checkBtn.disabled = false;
checkBtn.textContent = original;
}
});
applyBtn.addEventListener('click', async () => {
applyBtn.disabled = true;
checkBtn.disabled = true;
setStatus('Starting update…');
try {
const r = await fetch('/api/furtka/update/apply', { method: 'POST' });
const data = await r.json();
if (r.status === 409) {
setStatus('Another update is already running — watching it', true);
} else if (!r.ok) {
setStatus(data.error || `HTTP ${r.status}`, true);
applyBtn.disabled = false;
checkBtn.disabled = false;
return;
}
// Poll /update-state.json (served by Caddy, unaffected by the
// API restart the updater is about to trigger) every 2s.
pollHandle = setInterval(pollUpdateState, 2000);
// Fallback: reload regardless of whether polling observes 'done'.
// The mid-apply API restart can drop the poll connection before
// the terminal state is ever seen by this page.
fallbackReloadHandle = setTimeout(() => location.reload(), 45000);
} catch (e) {
setStatus(`Network error: ${e.message}`, true);
applyBtn.disabled = false;
checkBtn.disabled = false;
}
});
// --- Local HTTPS --------------------------------------------------
const httpsFingerprintEl = document.getElementById('https-fingerprint');
const httpsReachableEl = document.getElementById('https-reachable');
const httpsHostEl = document.getElementById('https-host');
const httpsDownloadBtn = document.getElementById('https-download-btn');
const httpsForceWrap = document.getElementById('https-force-wrap');
const httpsForceHint = document.getElementById('https-force-hint');
const httpsForce = document.getElementById('https-force');
const httpsStatusEl = document.getElementById('https-status');
httpsHostEl.textContent = location.hostname;
httpsDownloadBtn.addEventListener('click', () => {
// Use an anchor with the download attr so the browser treats
// the cert as a download rather than rendering it.
const a = document.createElement('a');
a.href = '/rootCA.crt';
a.download = 'furtka-local-rootCA.crt';
document.body.appendChild(a);
a.click();
a.remove();
});
async function refreshHttpsStatus() {
try {
const r = await fetch('/api/furtka/https/status', { cache: 'no-store' });
if (!r.ok) return;
const s = await r.json();
httpsFingerprintEl.textContent = s.fingerprint_sha256 || 'waiting for Caddy…';
httpsDownloadBtn.disabled = !s.ca_available;
httpsForce.checked = !!s.force_https;
updateForceToggleVisibility(s);
} catch (e) {
/* next refresh will retry */
}
}
async function probeHttpsReachable() {
if (location.protocol === 'https:') {
httpsReachableEl.textContent = 'yes — you are on HTTPS now';
return true;
}
try {
// no-cors: we don't need the response body, just whether the
// TLS handshake + fetch succeed. Browsers reject on untrusted
// cert with a TypeError, which is exactly the signal we want.
await fetch('https://' + location.hostname + '/furtka.json',
{ cache: 'no-store', mode: 'no-cors' });
httpsReachableEl.textContent = 'yes — CA already trusted';
return true;
} catch (e) {
httpsReachableEl.textContent = 'no — install the CA first';
return false;
}
}
let httpsReachableCache = false;
function updateForceToggleVisibility(status) {
// Show the force-redirect toggle only when both:
// - Caddy's CA exists (otherwise there's no HTTPS to redirect to)
// - the current browser already trusts the cert (otherwise the
// user would lock themselves out of this very page)
const show = status.ca_available && httpsReachableCache;
httpsForceWrap.hidden = !show;
httpsForceHint.hidden = !show;
}
httpsForce.addEventListener('change', async () => {
httpsForce.disabled = true;
const desired = httpsForce.checked;
httpsStatusEl.textContent = desired
? 'Enabling HTTP→HTTPS redirect…'
: 'Disabling HTTP→HTTPS redirect…';
httpsStatusEl.style.color = 'var(--muted)';
try {
const r = await fetch('/api/furtka/https/force', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ enabled: desired }),
});
const data = await r.json();
if (!r.ok) {
httpsStatusEl.textContent = data.error || `HTTP ${r.status}`;
httpsStatusEl.style.color = 'var(--danger)';
httpsForce.checked = !desired;
} else {
httpsStatusEl.textContent = data.force_https
? 'Redirect on — new HTTP requests will jump to HTTPS.'
: 'Redirect off — HTTP serves the content directly.';
}
} catch (e) {
httpsStatusEl.textContent = `Network error: ${e.message}`;
httpsStatusEl.style.color = 'var(--danger)';
httpsForce.checked = !desired;
} finally {
httpsForce.disabled = false;
}
});
(async () => {
httpsReachableCache = await probeHttpsReachable();
await refreshHttpsStatus();
})();
async function pollUpdateState() {
try {
const r = await fetch('/update-state.json', { cache: 'no-store' });
if (!r.ok) return;
const s = await r.json();
const label = STAGE_LABELS[s.stage] || `Stage: ${s.stage}`;
setStatus(label, s.stage === 'rolled_back');
if (s.stage === 'done') {
clearInterval(pollHandle);
clearTimeout(fallbackReloadHandle);
setTimeout(() => location.reload(), 5000);
} else if (s.stage === 'rolled_back') {
clearInterval(pollHandle);
clearTimeout(fallbackReloadHandle);
if (s.reason) {
setStatus(`${label} — ${s.reason}`, true);
}
applyBtn.disabled = false;
checkBtn.disabled = false;
}
} catch (e) {
/* keep polling; restart blip expected */
}
}
// Power buttons: confirm, POST, then swap the whole card into a
// "going down" state so the user doesn't keep clicking. After a
// reboot we try to reconnect after ~45s; for shutdown we just
// tell the user the box is off — no auto-reconnect attempt.
const powerStatusEl = document.getElementById('power-status');
const rebootBtn = document.getElementById('power-reboot');
const poweroffBtn = document.getElementById('power-poweroff');
function setPowerStatus(msg, tone = 'muted') {
powerStatusEl.textContent = msg;
powerStatusEl.style.color =
tone === 'error' ? 'var(--danger)' : 'var(--muted)';
}
async function triggerPower(action, confirmMsg, inflightLabel) {
if (!confirm(confirmMsg)) return;
rebootBtn.disabled = true;
poweroffBtn.disabled = true;
setPowerStatus(inflightLabel);
try {
const r = await fetch('/api/furtka/power', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ action }),
});
if (!r.ok) {
const data = await r.json().catch(() => ({}));
setPowerStatus(data.error || `HTTP ${r.status}`, 'error');
rebootBtn.disabled = false;
poweroffBtn.disabled = false;
return;
}
if (action === 'reboot') {
setPowerStatus('Rebooting… this page will reload when the box is back.');
// Try reconnecting after a generous delay. archinstall
// + boot + services typically takes 3045 s; give it 30
// before the first poke so we don't just spin against
// a down kernel.
setTimeout(pollForReconnect, 30000);
} else {
setPowerStatus(
'Shutdown scheduled. Press the physical power button to turn it back on.'
);
}
} catch (e) {
setPowerStatus(`Network error: ${e.message}`, 'error');
rebootBtn.disabled = false;
poweroffBtn.disabled = false;
}
}
async function pollForReconnect() {
// Fetch a tiny static file; when it comes back 200 the box is up.
try {
const r = await fetch('/furtka.json', { cache: 'no-store' });
if (r.ok) {
setPowerStatus('Back up — reloading…');
setTimeout(() => location.reload(), 1500);
return;
}
} catch (e) { /* still down */ }
setTimeout(pollForReconnect, 3000);
}
rebootBtn.addEventListener('click', () =>
triggerPower(
'reboot',
"Wirklich neu starten? Die Box ist für ~30 Sekunden nicht erreichbar.",
'Rebooting…'
)
);
poweroffBtn.addEventListener('click', () =>
triggerPower(
'poweroff',
"Wirklich ausschalten? Du kannst die Box erst wieder starten, wenn du den physischen Power-Knopf drückst.",
'Shutting down…'
)
);
</script>
</body>
</html>

View file

@ -1,11 +0,0 @@
{
"hostname": "",
"uptime": "starting…",
"docker_version": "starting…",
"disk_free": "starting…",
"ip_primary": "",
"kernel": "",
"ram_total": "",
"furtka_version": "",
"updated_at": ""
}

View file

@ -1,482 +0,0 @@
/* Furtka on-box design system. Served by Caddy at /style.css,
consumed by the landing page AND the resource-manager /apps
page. One source of truth for tokens + components. */
:root {
--bg: #0f1115;
--fg: #e8eaed;
--muted: #9aa0a6;
--accent: #6ee7b7;
--accent-soft: rgba(110, 231, 183, 0.12);
--card: #1a1d24;
--card-hover: #222530;
--border: #2a2d34;
--warn: #4a3030;
--warn-fg: #fed;
--danger: #f08080;
--r-sm: 4px;
--r-md: 8px;
--r-lg: 12px;
--r-pill: 999px;
--shadow-card: 0 1px 2px rgba(0, 0, 0, 0.3);
--ring: 0 0 0 2px var(--accent);
}
@media (prefers-color-scheme: light) {
:root {
--bg: #f7f6f3;
--fg: #17181c;
--muted: #5e6066;
--accent: #0f8a5f;
--accent-soft: rgba(15, 138, 95, 0.12);
--card: #ffffff;
--card-hover: #f0efeb;
--border: #e3e1dc;
--warn: #fde2d3;
--warn-fg: #5a2a10;
--danger: #c03a28;
--shadow-card: 0 1px 3px rgba(0, 0, 0, 0.08);
}
}
* { box-sizing: border-box; }
body {
margin: 0;
font-family: system-ui, -apple-system, "Segoe UI", Roboto, sans-serif;
background: var(--bg);
color: var(--fg);
line-height: 1.5;
}
/* Shared page container both landing and /apps wrap content in
<main class="wrap"> so sizing + padding stay consistent. */
.wrap { max-width: 780px; margin: 0 auto; padding: 1.25rem 1.5rem 3rem; }
/* Top nav — persistent across pages (Jakob's Law). */
.nav {
display: flex;
align-items: center;
justify-content: space-between;
padding-bottom: 1.25rem;
border-bottom: 1px solid var(--border);
margin-bottom: 2rem;
}
.brand {
font-weight: 700;
letter-spacing: 0.02em;
color: var(--fg);
text-decoration: none;
font-size: 1.05rem;
display: inline-flex;
align-items: center;
gap: 0.55rem;
}
.brand::before {
content: "";
width: 0.7rem;
height: 0.7rem;
background: var(--accent);
border-radius: 2px;
transform: rotate(45deg);
}
.nav-links { display: flex; gap: 0.25rem; }
.nav-links a {
color: var(--muted);
text-decoration: none;
font-size: 0.9rem;
padding: 0.35rem 0.75rem;
border-radius: var(--r-sm);
}
.nav-links a:hover { color: var(--fg); }
.nav-links a[aria-current="page"] {
color: var(--fg);
background: var(--accent-soft);
}
/* -- Landing page ---------------------------------------------- */
header h1 { margin: 0 0 0.5rem; font-size: 2.5rem; }
.lead { font-size: 1.25rem; color: var(--muted); margin: 0 0 0.25rem; }
.host { color: var(--muted); margin: 0 0 3rem; }
.host code {
background: var(--card);
padding: 0.15rem 0.5rem;
border-radius: var(--r-sm);
color: var(--accent);
}
section h2 {
font-size: 1.1rem;
text-transform: uppercase;
letter-spacing: 0.1em;
color: var(--muted);
margin: 2rem 0 1rem;
}
.tiles {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(180px, 1fr));
gap: 1rem;
}
.tile {
background: var(--card);
padding: 1.25rem;
border-radius: var(--r-md);
display: flex;
flex-direction: column;
}
.tile .label {
font-size: 0.8rem;
color: var(--muted);
text-transform: uppercase;
letter-spacing: 0.08em;
}
.tile .value { font-size: 1.25rem; margin-top: 0.5rem; }
.updated { font-size: 0.85rem; color: var(--muted); margin-top: 1rem; }
.soon {
background: var(--card);
padding: 1.5rem;
border-radius: var(--r-md);
margin-top: 2rem;
}
footer {
margin-top: 4rem;
padding-top: 1.5rem;
border-top: 1px solid var(--border);
color: var(--muted);
font-size: 0.9rem;
}
footer a { color: var(--accent); }
/* -- Apps page ------------------------------------------------- */
h1 { font-size: 2rem; margin: 0; }
h2 {
font-size: 1rem;
text-transform: uppercase;
letter-spacing: 0.1em;
color: var(--muted);
margin: 2rem 0 0.75rem;
}
.lede { color: var(--muted); margin: 0.25rem 0 1rem; }
.warn {
background: var(--warn);
padding: 1rem;
border-radius: var(--r-md);
margin: 1.5rem 0;
color: var(--warn-fg);
font-size: 0.9rem;
}
.app {
background: var(--card);
padding: 1rem;
border-radius: var(--r-md);
margin: 0.5rem 0;
display: flex;
justify-content: space-between;
align-items: center;
gap: 1rem;
box-shadow: var(--shadow-card);
}
.app .left {
display: flex;
align-items: center;
gap: 1rem;
min-width: 0;
flex: 1;
}
.meta { display: flex; flex-direction: column; min-width: 0; }
.name { font-weight: 600; font-size: 1.05rem; }
.name small { color: var(--muted); font-weight: 400; margin-left: 0.5rem; }
.desc {
color: var(--muted);
font-size: 0.9rem;
overflow: hidden;
text-overflow: ellipsis;
}
.buttons {
display: flex;
gap: 0.5rem;
flex-wrap: wrap;
justify-content: flex-end;
}
button, .btn {
background: var(--accent);
border: none;
color: var(--bg);
font-weight: 600;
padding: 0.5rem 1rem;
border-radius: var(--r-sm);
cursor: pointer;
white-space: nowrap;
font-size: 0.9rem;
font-family: inherit;
/* Anchor rendered-as-button: strip underline + keep the button's
rectangular hit area. `display: inline-flex` so an <a class="btn">
lines up vertically with its <button> siblings in .buttons. */
text-decoration: none;
display: inline-flex;
align-items: center;
}
button.secondary, .btn.secondary {
background: var(--card);
color: var(--fg);
border: 1px solid var(--border);
}
button.danger { background: var(--danger); color: #fff; }
button:disabled { opacity: 0.5; cursor: wait; }
button:focus-visible, .btn:focus-visible { outline: none; box-shadow: var(--ring); }
.empty { color: var(--muted); font-style: italic; padding: 0.5rem 0; }
.catalog-row {
display: flex;
justify-content: space-between;
align-items: center;
flex-wrap: wrap;
gap: 0.75rem;
padding: 0.5rem 0 0.75rem;
}
.catalog-state {
margin: 0;
color: var(--muted);
font-size: 0.9rem;
}
.catalog-stage.pending {
color: var(--fg);
font-style: italic;
}
pre {
background: var(--card);
padding: 1rem;
border-radius: var(--r-md);
overflow-x: auto;
font-size: 0.85rem;
white-space: pre-wrap;
word-wrap: break-word;
}
details.log-details {
margin-top: 0.25rem;
}
details.log-details > summary {
cursor: pointer;
color: var(--muted);
font-size: 0.9rem;
padding: 0.25rem 0;
user-select: none;
}
details.log-details[open] > summary { color: var(--fg); }
/* Modal */
.modal-backdrop {
position: fixed;
inset: 0;
background: rgba(0, 0, 0, 0.6);
display: none;
align-items: flex-start;
justify-content: center;
padding: 2rem 1rem;
overflow-y: auto;
z-index: 10;
}
.modal-backdrop.open { display: flex; }
.modal {
background: var(--card);
border-radius: var(--r-md);
padding: 1.5rem;
max-width: 520px;
width: 100%;
}
.modal h3 { margin: 0 0 0.5rem; font-size: 1.3rem; }
.modal .long {
color: var(--muted);
font-size: 0.9rem;
margin-bottom: 1.25rem;
white-space: pre-wrap;
}
.field { margin-bottom: 1rem; }
.field label {
display: block;
font-weight: 600;
margin-bottom: 0.25rem;
font-size: 0.95rem;
}
.field .hint { color: var(--muted); font-size: 0.85rem; margin-bottom: 0.35rem; }
.field input {
width: 100%;
background: var(--bg);
color: var(--fg);
border: 1px solid var(--border);
border-radius: var(--r-sm);
padding: 0.5rem 0.6rem;
font-size: 0.95rem;
font-family: inherit;
}
.field input:focus { outline: 2px solid var(--accent); outline-offset: -1px; }
.field .req { color: var(--danger); margin-left: 0.25rem; }
.modal .error,
.login-wrap .error {
background: var(--warn);
color: var(--warn-fg);
padding: 0.5rem 0.75rem;
border-radius: var(--r-sm);
margin-bottom: 1rem;
font-size: 0.9rem;
display: none;
}
.modal .error.show,
.login-wrap .error.show { display: block; }
/* Login + first-run setup page. Shares .wrap's max-width so the form
sits in the same column the rest of the app uses, just without the
Home/Apps/Settings nav. A bit of top padding so the H1 isn't glued
to the viewport edge. */
.login-wrap { padding-top: 3rem; }
.login-wrap .actions { margin-top: 0.5rem; }
.modal-actions {
display: flex;
justify-content: flex-end;
gap: 0.5rem;
margin-top: 0.5rem;
}
/* Row of buttons beneath a card used by the Furtka updates card on
/settings. Left-aligned, wraps on narrow screens. */
.update-actions,
.power-actions {
display: flex;
gap: 0.5rem;
flex-wrap: wrap;
margin-top: 1rem;
align-items: center;
}
/* Inline link rendered alongside a button (e.g. next to "Download CA"
on /settings). No button chrome just accent colour + underline on
hover so the distinction between primary action and secondary
resource stays visually clear. */
.inline-link {
color: var(--accent);
text-decoration: none;
font-size: 0.9rem;
}
.inline-link:hover { text-decoration: underline; }
/* Checkbox + label row for the /settings HTTPS-force toggle. */
.https-toggle {
display: flex;
align-items: center;
gap: 0.55rem;
margin-top: 1rem;
font-size: 0.95rem;
cursor: pointer;
}
.https-toggle input { cursor: pointer; }
/* -- Shared primitives for later slices ------------------------ */
.chip {
display: inline-block;
background: var(--card);
color: var(--accent);
padding: 0.15rem 0.6rem;
border-radius: var(--r-pill);
font-size: 0.8rem;
font-family: ui-monospace, SFMono-Regular, Menlo, monospace;
}
.chip-muted { color: var(--muted); }
.card {
background: var(--card);
padding: 1.25rem;
border-radius: var(--r-md);
box-shadow: var(--shadow-card);
}
.card + .card { margin-top: 1rem; }
.card h3 { margin: 0 0 0.75rem; font-size: 1.05rem; }
.kv {
display: grid;
grid-template-columns: max-content 1fr;
column-gap: 1.25rem;
row-gap: 0.4rem;
font-size: 0.95rem;
}
.kv dt { color: var(--muted); }
.kv dd {
margin: 0;
color: var(--fg);
font-family: ui-monospace, SFMono-Regular, Menlo, monospace;
/* Grid items default to min-width: auto (= content width), so a long
unbreakable value like a SHA-256 fingerprint would push past the
card. min-width: 0 lets the 1fr track enforce the column width, and
overflow-wrap: anywhere gives the colon-separated hex string valid
break opportunities. */
min-width: 0;
overflow-wrap: anywhere;
}
.coming {
display: flex;
flex-wrap: wrap;
gap: 0.5rem;
margin-top: 0.5rem;
}
.coming a {
color: var(--muted);
text-decoration: none;
padding: 0.3rem 0.8rem;
border-radius: var(--r-pill);
border: 1px solid var(--border);
font-size: 0.85rem;
}
.coming a:hover { color: var(--fg); border-color: var(--accent); }
.coming .hint {
color: var(--muted);
font-size: 0.85rem;
width: 100%;
margin: 0 0 0.25rem;
}
.grid-apps {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(150px, 1fr));
gap: 0.75rem;
}
.app-tile {
background: var(--bg);
border: 1px solid var(--border);
border-radius: var(--r-md);
padding: 1rem;
display: flex;
flex-direction: column;
gap: 0.5rem;
align-items: flex-start;
text-decoration: none;
color: var(--fg);
transition: border-color 120ms, background 120ms;
}
.app-tile:hover { border-color: var(--accent); background: var(--card-hover); }
.app-tile .icon {
width: 40px;
height: 40px;
color: var(--accent);
display: flex;
align-items: center;
justify-content: center;
}
.app-tile .icon svg { width: 100%; height: 100%; }
.app-tile .name { font-weight: 600; font-size: 0.95rem; }
.app-tile .cta { color: var(--accent); font-size: 0.85rem; }
/* Icon slot inside a /apps row. The app icon inherits currentColor
so a folder path rendered with fill="currentColor" picks up the
accent, while a nested <path> using stroke="var(--accent)" still
gets the brand color. */
.app-icon {
width: 56px;
height: 56px;
flex-shrink: 0;
background: var(--accent-soft);
border-radius: var(--r-md);
display: flex;
align-items: center;
justify-content: center;
color: var(--accent);
}
.app-icon svg { width: 36px; height: 36px; }

View file

@ -4,10 +4,10 @@
## TL;DR ## TL;DR
- **None of the three has a device-aware install wizard.** All three either drop you into a Debian-installer-style flow (YunoHost), a single-question TUI picker (Umbrel), or no OS install at all (CasaOS is a curl-to-bash overlay). Furtka's "boot USB → web wizard detects hardware → done" angle is genuinely unoccupied. - **None of the three has a device-aware install wizard.** All three either drop you into a Debian-installer-style flow (YunoHost), a single-question TUI picker (Umbrel), or no OS install at all (CasaOS is a curl-to-bash overlay). Homebase's "boot USB → web wizard detects hardware → done" angle is genuinely unoccupied.
- **HTTPS/reverse-proxy story is a disaster across the board.** Umbrel has refused HTTPS on the local UI for 4+ years ([issue #546](https://github.com/getumbrel/umbrel/issues/546), open since Feb 2021, still open). CasaOS ships plain HTTP and the web UI has root filesystem access. YunoHost is the only one that does Let's Encrypt properly — but only for its own domain wizard, and the DNS/email setup is where newbies quit. - **HTTPS/reverse-proxy story is a disaster across the board.** Umbrel has refused HTTPS on the local UI for 4+ years ([issue #546](https://github.com/getumbrel/umbrel/issues/546), open since Feb 2021, still open). CasaOS ships plain HTTP and the web UI has root filesystem access. YunoHost is the only one that does Let's Encrypt properly — but only for its own domain wizard, and the DNS/email setup is where newbies quit.
- **Umbrel's license (PolyForm Noncommercial) disqualifies it for anyone who cares about true open source.** This is the #1 complaint on r/selfhosted and the reason Citadel forked. Staying AGPL is a real differentiator against the slickest-looking competitor. - **Umbrel's license (PolyForm Noncommercial) disqualifies it for anyone who cares about true open source.** This is the #1 complaint on r/selfhosted and the reason Citadel forked. Staying AGPL is a real differentiator against the slickest-looking competitor.
- **Storage handling is the weakest dimension for all three.** Umbrel can't use multiple drives or change storage paths per app. CasaOS mounts externals read-only by default and requires editing files to fix. YunoHost only does partitioning in "expert mode" and never revisits it after install. Furtka's drive-scoring + boot/LVM auto-assignment idea has no real competition here. - **Storage handling is the weakest dimension for all three.** Umbrel can't use multiple drives or change storage paths per app. CasaOS mounts externals read-only by default and requires editing files to fix. YunoHost only does partitioning in "expert mode" and never revisits it after install. Homebase's drive-scoring + boot/LVM auto-assignment idea has no real competition here.
- **CasaOS is in maintenance mode.** IceWhale has moved focus to ZimaOS (paid hardware). Users on GitHub are asking whether the project is still alive ([Discussion #2386](https://github.com/IceWhaleTech/CasaOS/discussions/2386)). Momentum window is open. - **CasaOS is in maintenance mode.** IceWhale has moved focus to ZimaOS (paid hardware). Users on GitHub are asking whether the project is still alive ([Discussion #2386](https://github.com/IceWhaleTech/CasaOS/discussions/2386)). Momentum window is open.
--- ---
@ -146,17 +146,17 @@ This is the single dimension where YunoHost is best-in-class of the three.
--- ---
## Implications for Furtka ## Implications for Homebase
### Copy (these are proven patterns) ### Copy (these are proven patterns)
- **YunoHost's post-install web wizard model** — self-signed `https://proksi.local`, walk user through domain + admin user + cert, diagnostic that flags DNS/port issues in yellow/red. This is clearly the best onboarding of the three; mimicking it for first boot is a shortcut. - **YunoHost's post-install web wizard model** — self-signed `https://proksi.local`, walk user through domain + admin user + cert, diagnostic that flags DNS/port issues in yellow/red. This is clearly the best onboarding of the three; mimicking it for first boot is a shortcut.
- **YunoHost's SSO-wired app installs** — every app they package is automatically integrated into the bundled auth. If we commit to one identity provider (Authentik is the homelab default) and wire every app template to it, we beat Umbrel and CasaOS on day one. - **YunoHost's SSO-wired app installs** — every app they package is automatically integrated into the bundled auth. If we commit to one identity provider (Authentik is the homelab default) and wire every app template to it, we beat Umbrel and CasaOS on day one.
- **Umbrel's app-store polish** — the grid, the screenshots, the categories, the one-click defaults. Visually they nailed it. Copy the UX, not the license. - **Umbrel's app-store polish** — the grid, the screenshots, the categories, the one-click defaults. Visually they nailed it. Copy the UX, not the license.
- **CasaOS's "works on any Linux" optionality** — useful fallback. Our primary story is a full OS, but having a "Furtka layer" installable on an existing Debian/Arch system would widen the funnel dramatically. - **CasaOS's "works on any Linux" optionality** — useful fallback. Our primary story is a full OS, but having a "Homebase layer" installable on an existing Debian/Arch system would widen the funnel dramatically.
### Avoid ### Avoid
- **Umbrel's license trap.** AGPL-3.0 (already our plan per README) is the exact counter-positioning. Every r/selfhosted thread about Umbrel mentions PolyForm. Lead with the license in marketing copy. - **Umbrel's license trap.** AGPL-3.0 (already our plan per README) is the exact counter-positioning. Every r/selfhosted thread about Umbrel mentions PolyForm. Lead with the license in marketing copy.
- **Umbrel's "we don't need HTTPS on LAN" stance.** It's 4+ years of public embarrassment. Furtka should ship HTTPS on `proksi.local` from day one — self-signed or via a local CA, doesn't matter, just not plaintext. - **Umbrel's "we don't need HTTPS on LAN" stance.** It's 4+ years of public embarrassment. Homebase should ship HTTPS on `proksi.local` from day one — self-signed or via a local CA, doesn't matter, just not plaintext.
- **CasaOS's root-Docker + root-FS-via-web-UI security posture.** Non-negotiable: run Docker rootless or at least not expose the filesystem through the web UI. - **CasaOS's root-Docker + root-FS-via-web-UI security posture.** Non-negotiable: run Docker rootless or at least not expose the filesystem through the web UI.
- **YunoHost's bundled-LDAP lock-in.** Their "you must use our directory" rule is the top power-user complaint. Make auth pluggable from the start. - **YunoHost's bundled-LDAP lock-in.** Their "you must use our directory" rule is the top power-user complaint. Make auth pluggable from the start.
- **YunoHost's non-isolated apps.** Docker-per-app is already our plan; this is just confirmation. - **YunoHost's non-isolated apps.** Docker-per-app is already our plan; this is just confirmation.
@ -165,7 +165,7 @@ This is the single dimension where YunoHost is best-in-class of the three.
### Where our wizard + gateway differentiation actually holds up ### Where our wizard + gateway differentiation actually holds up
1. **Device-aware install wizard** — real gap. Umbrel asks "which number is your drive," YunoHost runs stock Debian partitioning, CasaOS doesn't install an OS at all. A wizard that enumerates drives, shows SMART health, benchmarks speeds, and proposes "fast drive = OS/apps, big drive = data, HDD pool = backups" has no equivalent. This is the single most defensible pitch. 1. **Device-aware install wizard** — real gap. Umbrel asks "which number is your drive," YunoHost runs stock Debian partitioning, CasaOS doesn't install an OS at all. A wizard that enumerates drives, shows SMART health, benchmarks speeds, and proposes "fast drive = OS/apps, big drive = data, HDD pool = backups" has no equivalent. This is the single most defensible pitch.
2. **Managed gateway** — real gap. Every competitor punts reverse proxy + Let's Encrypt + DNS to the user. Even YunoHost, which does SSL best, assumes you'll edit DNS at your registrar manually. A Furtka subscription that says "point your domain's nameservers at us, we handle the rest" is genuinely new in this space. The Proxmox-model subscription narrative is credible because there's no competition. 2. **Managed gateway** — real gap. Every competitor punts reverse proxy + Let's Encrypt + DNS to the user. Even YunoHost, which does SSL best, assumes you'll edit DNS at your registrar manually. A Homebase subscription that says "point your domain's nameservers at us, we handle the rest" is genuinely new in this space. The Proxmox-model subscription narrative is credible because there's no competition.
3. **Arch rolling base** — mild gap. All three competitors are Debian. Faster updates are real, but this is a technical differentiator that end users won't perceive. Keep it as a reason for the tech crowd to cheerlead, not a primary pitch. 3. **Arch rolling base** — mild gap. All three competitors are Debian. Faster updates are real, but this is a technical differentiator that end users won't perceive. Keep it as a reason for the tech crowd to cheerlead, not a primary pitch.
### Where the pitch is weaker than we think ### Where the pitch is weaker than we think

View file

@ -79,4 +79,4 @@ Navigation: **Back** button, **Next** button (repeats for each device)
- Auto setup reduces decisions for beginners - Auto setup reduces decisions for beginners
- "I don't know" defaults keep things safe for non-technical users - "I don't know" defaults keep things safe for non-technical users
- Installer UI labeled **Proksi UX** in Robert's sketches - Installer UI labeled **Proksi UX** in Robert's sketches
- Project name: **Furtka** (Polish for "gate"), domain `furtka.org`. Installer UI continues under the **Proksi** name. - Project working titles: **Homebase** (repo name), **Furtka / FurtkaOS** (Robert's codename — Polish for "gate")

View file

@ -1,156 +0,0 @@
# Resource Manager
The layer between Furtka apps and the underlying system (disk, Docker, network). Apps don't touch Docker or the filesystem directly — they declare what they need in a manifest and the Resource Manager provisions, runs, and tracks them.
**Status:** v1 shipped 2026-04-15 in commits `cfc4c0b..c6ed7a8`, validated end-to-end on Proxmox VM same day (install → Web UI → fileshare install → SMB client → reboot-persistence all green). Commit `61c7ee2` adds the in-browser settings form and `description_long` so users no longer need SSH to configure an app. First consumer is the LAN fileshare app at `apps/fileshare/`. Web UI at `http://<host>.local/apps`.
For the conversation that produced these decisions and the Q&A live in the chat, see `~/.claude/plans/stateful-juggling-pike.md`.
---
## Anatomy of an app
Every Furtka app is a directory containing exactly:
```
manifest.json # required — the contract
docker-compose.yaml # required — container lifecycle
.env.example # optional — bootstraps .env on first install
.env # optional — user-edited secrets (preserved across upgrades)
icon.svg # optional but referenced in the manifest
```
The directory **name** is the app name. The manifest's `name` field must match it once installed (the scanner enforces this). When you install from an arbitrary source path the manifest's name decides where it lands — so `furtka app install /tmp/some-fork/` works regardless of what the source folder is called.
### Manifest schema
JSON. Fields marked *required* are mandatory; *optional* ones default as noted.
```json
{
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.0",
"description": "SMB share for LAN devices",
"description_long": "Longer user-facing help shown above the settings form.",
"volumes": ["files"],
"ports": [445, 139],
"icon": "icon.svg",
"settings": [
{
"name": "SMB_USER",
"label": "Benutzername",
"description": "Der Name, mit dem sich Geräte am Share anmelden.",
"type": "text",
"default": "furtka",
"required": true
},
{
"name": "SMB_PASSWORD",
"label": "Passwort",
"description": "Mindestens 8 Zeichen. Wird nie angezeigt.",
"type": "password",
"required": true
}
]
}
```
Top-level fields:
- `name` *(required)* — machine id, must equal the install folder name.
- `display_name` *(required)* — shown in the UI.
- `version` *(required)* — free-form string (semver expected, not enforced).
- `description` *(required)* — one-line summary rendered on the card.
- `description_long` *(optional, default `""`)* — multi-sentence help text rendered above the settings form. Plain text, newlines preserved.
- `volumes` *(required)* — list of short names. Furtka creates each as `furtka_<app>_<vol>` (collision-free across apps). Compose files MUST reference the namespaced name as `external: true`.
- `ports` *(required)* — informational for the UI. Compose owns the actual port binding.
- `icon` *(required)* — relative path inside the app folder.
- `settings` *(optional, default `[]`)* — see next section.
### Settings schema
Each entry in `settings` declares one environment variable the user can fill in via the Web UI (or via `POST /api/apps/install` with a `settings` object). On install/edit the values are written to `.env` in the app folder.
- `name` *(required)* — env-var name. Must match `^[A-Z_][A-Z0-9_]*$` (UPPER_SNAKE_CASE). Duplicates rejected.
- `label` *(optional, defaults to `name`)* — human-readable label rendered as the form label.
- `description` *(optional, default `""`)* — one-sentence help text rendered under the label.
- `type` *(optional, default `"text"`)* — one of `"text"`, `"password"`, `"number"`. Controls the HTML input type AND whether the current value is masked on the settings GET endpoint (password values are never returned, only written).
- `required` *(optional, default `false`)* — checked against the *merged* `.env` after submit, so edit-mode can omit unchanged fields and still pass.
- `default` *(optional, default `null`)* — applied on first install if the user didn't submit the field. String-coerced if non-string.
**Merge semantics on edit:** the installed app's existing `.env` is the base, submitted settings overlay it. Omit a field and its current value is preserved; submit `""` and it's cleared (which triggers `required` rejection).
**Placeholder rejection still applies:** if a final `.env` value matches `furtka.installer.PLACEHOLDER_SECRETS` (currently `{"changeme"}`), install fails with `InstallError` — so apps shipping an `.env.example` with `changeme` stay safe even if the user skips the form.
---
## Lifecycle
### Discovery: boot-scan
`furtka-reconcile.service` (oneshot, after `docker.service`) runs `furtka reconcile` at every boot. The reconciler walks `/var/lib/furtka/apps/*`, validates each manifest, ensures every declared volume exists, then `docker compose up -d` per app. Filesystem is the only source of truth — no separate index, no DB.
A failed reconcile of one app does not abort the others. The CLI exits non-zero if any app errored, so systemd marks the unit red, but the healthy apps still come up.
### Install
- `furtka app install <path>` — install from a local folder.
- `furtka app install <name>` — falls back to `/opt/furtka/apps/<name>/` (apps bundled with the ISO).
- Web UI: click Install on a card under "Available to install".
The installer copies files into `/var/lib/furtka/apps/<name>/`, preserves any existing user `.env`, bootstraps `.env` from `.env.example` on first install, and `chmod 0600` on `.env`.
**Placeholder secrets are refused.** If `.env` ends up containing values listed in `furtka.installer.PLACEHOLDER_SECRETS` (currently `{"changeme"}`), install raises `InstallError` and the reconciler is not run. Files are left in place so the user can vim the `.env` and re-run install.
### Remove
- `furtka app remove <name>``docker compose down`, then delete the app folder.
- Web UI: Remove button.
**Volumes are NEVER deleted.** Reinstall recovers the data. Manual `docker volume rm furtka_<app>_<vol>` if you really want to wipe.
---
## Backend
`furtka/api.py` runs as `furtka-api.service` — Python stdlib `http.server` (no Flask), bound to `127.0.0.1:7000`. Caddy reverse-proxies `/api/*` and `/apps*` from `:80`.
Endpoints:
- `GET /` and `/apps` — self-contained HTML UI.
- `GET /api/apps` — installed apps as JSON (each includes `has_settings` so the UI can show the "Settings" button only when relevant).
- `GET /api/bundled` — apps available in `/opt/furtka/apps/` that aren't installed.
- `GET /api/apps/<name>/settings` — returns the manifest's settings alongside current `.env` values. Works for both installed and bundled apps. Password values are returned as empty strings.
- `POST /api/apps/<name>/settings` `{"settings": {...}}` — merges values into the installed app's `.env` and reconciles. Only for already-installed apps.
- `POST /api/apps/install` `{"name": "...", "settings": {...}}` — install/reinstall. `settings` is optional; if present the `.env` is written from it before the placeholder check.
- `POST /api/apps/remove` `{"name": "..."}` — remove (folder, not volume).
The UI has **no authentication**. It shouts the warning at the top. Authentik integration is the proper fix later.
---
## Out of scope for v1
These are deliberate omissions, not forgotten work. Adding any of them is a discussed design change.
- SQL database — filesystem is authoritative, full stop.
- Volume sharing between apps (would be the first DB use case).
- Auth on the web UI.
- TLS on `.local` (separate problem — see commit history around mDNS for the reasoning).
- Catalog repo — `install <name>` only resolves bundled apps, no network catalog.
- Auto-updates of installed apps.
- Free-form `.env` editor — the settings form only exposes fields declared in the manifest. Non-manifest keys in `.env` are preserved on edit but not editable through the UI.
---
## Code map
| File | Purpose |
| --- | --- |
| `furtka/manifest.py` | JSON schema validation, `Manifest` dataclass, namespacing helper |
| `furtka/scanner.py` | Walks `/var/lib/furtka/apps/`, returns `ScanResult`s (broken manifests = error, not exception) |
| `furtka/reconciler.py` | Drives the per-app loop; isolates errors so one broken app doesn't block others |
| `furtka/installer.py` | Copy-from-source, `.env` bootstrap + 0600, placeholder rejection |
| `furtka/dockerops.py` | `docker volume` + `docker compose` subprocess wrappers |
| `furtka/api.py` | HTTP server + HTML UI |
| `furtka/cli.py` | `furtka app list/install/remove`, `furtka reconcile`, `furtka serve` |
| `apps/fileshare/` | First consumer — SMB share via `dperson/samba` |
| `iso/build.sh` | Tarballs `furtka/` + `apps/` into the live ISO at build time |
| `webinstaller/app.py` | `_resource_manager_commands()` + new systemd units (reconcile + api) for archinstall custom_commands |

View file

@ -1,12 +1,8 @@
# Forgejo Runner Setup # Forgejo Runner Setup
How to stand up a `forgejo-runner` so the CI workflows under How to stand up a `forgejo-runner` so the CI workflow in `.forgejo/workflows/ci.yml` actually executes on every push.
[`.forgejo/workflows/`](../.forgejo/workflows/) — `ci.yml` (lint,
pytest, JSON & link checks) and `build-iso.yml` (produces the live
ISO as a downloadable artifact) — run on every push to `main`.
Ready-to-use `compose.yml` and `config.yml` live in The runner is a long-running daemon that polls the Forgejo instance for queued jobs and runs them in Docker containers.
[`ops/forgejo-runner/`](../ops/forgejo-runner/).
## Choosing a host ## Choosing a host
@ -16,142 +12,102 @@ Ready-to-use `compose.yml` and `config.yml` live in
| **Home server / NAS** | Free; plenty of capacity | CI blocked if home network / power drops | | **Home server / NAS** | Free; plenty of capacity | CI blocked if home network / power drops |
| **Local dev machine** | Quick to set up, fast runs | CI only works while the machine is on | | **Local dev machine** | Quick to set up, fast runs | CI only works while the machine is on |
Recommendation: **home server or a cheap VPS**. Don't use a laptop that suspends. Recommendation for now: **home server or a cheap VPS**. Don't use a laptop that suspends.
## Architecture at a glance
The runner uses **docker-outside-of-docker (DooD)**: it mounts the host's
`/var/run/docker.sock` into itself and spawns job containers on the host
daemon. We went back and forth on this — the tempting alternative is a
docker-in-docker (DinD) sidecar for isolation — but DinD makes
`iso/build.sh` fail: `build.sh` does its own nested `docker run -v …` and
the path inside a DinD-hosted job isn't visible to host docker. DooD
trades some isolation for paths that line up everywhere. This runner VM
is single-purpose, so that trade is fine.
One non-obvious piece: the runner's default internal data directory is
`/data`. Host-mode jobs (see the `self-hosted:host` label below) tell
host docker to bind-mount `/data/.cache/act/…/hostexecutor` — which is
the container's filesystem path, not the host's. The fix is to make
`/data` exist on the host too, pointing at the same files, via a symlink:
```bash
sudo ln -s /home/<user>/forgejo-runner/data /data
```
This one line is what lets `-v /data/…:/work` resolve correctly.
## Install ## Install
On a fresh Ubuntu VM: Pick either the binary or the Docker container path. Docker is easier to upgrade.
```bash ### Path A: Docker Compose (recommended)
# Docker Engine + compose plugin (official repo)
./ops/forgejo-runner/bootstrap.sh
# Node.js on the HOST is not required — the runner container installs On the chosen host, create `~/forgejo-runner/compose.yml`:
# it inside itself on startup. But host tools help for debugging.
```yaml
services:
runner:
image: code.forgejo.org/forgejo/runner:6
container_name: forgejo-runner
restart: unless-stopped
environment:
- CONFIG_FILE=/data/config.yml
volumes:
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- docker-in-docker
command: /bin/sh -c "sleep 5; forgejo-runner daemon"
docker-in-docker:
image: docker:dind
container_name: forgejo-runner-dind
restart: unless-stopped
privileged: true
environment:
- DOCKER_TLS_CERTDIR=
command: dockerd -H tcp://0.0.0.0:2375 --tls=false
``` ```
Copy the reference `compose.yml` and `config.yml` to `~/forgejo-runner/` ### Path B: Binary
and `~/forgejo-runner/data/` respectively. Create the `/data` symlink:
Download the latest release from https://code.forgejo.org/forgejo/runner/releases and drop it somewhere in `$PATH`:
```bash ```bash
mkdir -p ~/forgejo-runner/data wget https://code.forgejo.org/forgejo/runner/releases/download/v6.0.0/forgejo-runner-6.0.0-linux-amd64
cp ops/forgejo-runner/compose.yml ~/forgejo-runner/compose.yml chmod +x forgejo-runner-6.0.0-linux-amd64
cp ops/forgejo-runner/config.yml ~/forgejo-runner/data/config.yml sudo mv forgejo-runner-6.0.0-linux-amd64 /usr/local/bin/forgejo-runner
sudo ln -s "$HOME/forgejo-runner/data" /data
``` ```
## Register ## Register
1. In the Forgejo web UI: **Site Administration → Actions → Runners → 1. In the Forgejo web UI: go to **Site Administration → Actions → Runners → Create new Runner**. Copy the registration token. (For a repo-scoped runner instead, use **Repo Settings → Actions → Runners**.)
Create new Runner** (or **Repo Settings → Actions → Runners** for a
repo-scoped runner). Copy the registration token.
2. Register from the host by running the registration inside a one-shot 2. Register from the runner host:
container so the resulting `.runner` file lands in the mounted
`data/` directory:
```bash ```bash
cd ~/forgejo-runner
docker run --rm -v "$PWD/data:/data" code.forgejo.org/forgejo/runner:6 \
forgejo-runner register \ forgejo-runner register \
--instance https://forgejo.sourcegate.online \ --instance https://forgejo.sourcegate.online \
--token <TOKEN> \ --token <TOKEN> \
--name forge-runner-01 \ --name homebase-runner-1 \
--labels docker,ubuntu-latest,self-hosted \
--no-interactive --no-interactive
``` ```
Note: labels are configured in `config.yml`, not at registration This writes `.runner` (registration file) and `config.yml` into the current directory.
time — `config.yml` has `labels:` populated with the three we use
(`ubuntu-latest`, `docker`, `self-hosted`), each mapped to either
a container image or `:host` mode.
3. Start the daemon: `docker compose up -d`. 3. Start the daemon (Docker: `docker compose up -d`; binary: `forgejo-runner daemon`).
4. Verify in Forgejo admin → Actions → Runners that `forge-runner-01` 4. Verify the runner shows up as **Idle** in Forgejo's admin Runners page.
shows as **Idle**, and `docker logs forgejo-runner` prints
`runner: forge-runner-01, ..., declared successfully` along with
the installed `node` + `docker-cli` versions.
## Two runtime modes
The `config.yml` labels set up two job execution modes:
- **`ubuntu-latest` / `docker``docker://catthehacker/ubuntu:act-latest`.**
The standard mode. Jobs run in a fresh `catthehacker/ubuntu:act-latest`
container. Good isolation, standard GHA-compatible image. Used by
`ci.yml` (ruff, pytest, JSON & link checks).
- **`self-hosted``:host`.** Steps execute *directly* in the runner
container (no per-job wrapping container). Used by `build-iso.yml`
because `iso/build.sh` needs `docker run -v $REPO_ROOT:/work` to hit
a path host docker can resolve — wrapping in a job container
reintroduces the namespace mismatch.
Because host-mode jobs run inside the runner container, that container
needs tools the jobs invoke — Node (for JS-based actions like
`actions/checkout@v4`), Git (already in the base image), and the Docker
CLI (for `iso/build.sh`). The `command:` in `compose.yml` apk-installs
nodejs + docker-cli before launching the daemon, so those tools are
always present after container start.
## First CI run ## First CI run
Push a commit to `main` — the Actions tab should show: Push any commit; the Actions tab on the repo should show the workflow running. If nothing happens:
- `CI` workflow (`ci.yml`) running lint, tests, JSON validation, markdown - Confirm the runner is online (Forgejo admin → Actions → Runners).
links. Green in ~30 s. - Check the workflow has labels that match the runner (`runs-on: ubuntu-latest` needs a runner registered with that label).
- `Build ISO` workflow (`build-iso.yml`) running `iso/build.sh` inside - Check the runner logs: `docker logs forgejo-runner` or the systemd journal.
the runner container. Takes ~5 min (pacstrap + mkarchiso). The
resulting `.iso` lands as a `furtka-iso` artifact on the run page,
retained 14 days.
If the workflow queues forever, check: ## Systemd unit (for the binary path)
- Runner online in Forgejo admin. ```ini
- `docker logs forgejo-runner` for errors. [Unit]
- The workflow's `runs-on:` matches a label the runner advertises. Description=Forgejo Actions Runner
After=docker.service
Requires=docker.service
## Artifact compatibility note [Service]
ExecStart=/usr/local/bin/forgejo-runner daemon
WorkingDirectory=/var/lib/forgejo-runner
User=forgejo-runner
Restart=on-failure
Forgejo's Actions API is GHES-compatible (not full GHA), so use [Install]
`actions/upload-artifact@v3` — **v4+ fails with WantedBy=multi-user.target
`GHESNotSupportedError`** because it needs the newer `@actions/artifact` ```
protocol Forgejo hasn't implemented yet.
Save as `/etc/systemd/system/forgejo-runner.service`, then `sudo systemctl enable --now forgejo-runner`.
## Security notes ## Security notes
- DooD gives jobs full access to the host's docker daemon — they can - The runner mounts `/var/run/docker.sock` which gives it root-equivalent access to the host. Run it on a machine you trust and nothing sensitive.
spawn arbitrary containers, including `--privileged` ones. Keep the - Registration tokens are one-shot; a stolen token can't re-register after the runner is live.
runner VM dedicated to CI; don't run other user workloads on it. - Prefer repo-scoped runners over instance-wide if you're sharing the runner with other repos you don't control.
- The runner container itself runs as root (`user: "0:0"`). This is
acceptable because the whole VM is purpose-built, but it's a bigger
footgun than the standard non-root runner image default.
- Registration tokens are one-shot; once a runner is live, the token
can't re-register.
- Ubuntu's `systemd-resolved` stub resolver (`127.0.0.53`) sometimes
leaks LAN-only DNS servers that containers can't reach. If container
DNS fails, set explicit upstream DNS in `/etc/docker/daemon.json`
(e.g. `{"dns": ["1.1.1.1", "8.8.8.8"]}`) and restart docker.

View file

@ -1,106 +0,0 @@
# Smoke VM on Proxmox Test Host
Every push to `main` builds a fresh ISO (`build-iso.yml`) and then boots
it in a throwaway VM on the Proxmox test host — currently
`192.168.178.165` — to confirm the live ISO boots and the webinstaller
responds on `:5000`. If the smoke step fails, the ISO artifact is still
uploaded and the VM is left running for post-mortem.
The heavy lifting lives in [`scripts/smoke-vm.sh`](../scripts/smoke-vm.sh);
the workflow just downloads the artifact and shells out.
## Where smoke VMs live
- Node: whatever the test host reports as its node name (auto-detected)
- VMID range: `90009099` (`PVE_TEST_VMID_MIN` / `PVE_TEST_VMID_MAX`)
- Name: `furtka-smoke-<12-char-sha>`
- Tags: `furtka`, `smoke`, `sha-<12-char-sha>`
- MAC: `BC:24:11:<first-6-hex-of-sha>` (Proxmox's OUI; lets the runner
find the VM by scanning the LAN — the live ISO has no guest agent)
- ISO on test host: `local:iso/furtka-<short-sha>.iso`
Five most recent VMs (and their ISOs) are kept; anything older is stopped
and purged (`destroy-unreferenced-disks=1`) on the next run. Tune via
`PVE_TEST_KEEP`.
## Poking a failed smoke VM
1. Find it in the Proxmox WebUI — look for `furtka-smoke-<sha>` in the
9000-range. The VM is still running.
2. Console: **Console** tab in the WebUI (SPICE or noVNC). The webinstaller
logs to `journalctl -u furtka-webinstaller.service` on the live ISO.
3. SSH: the live Arch ISO ships `sshd` enabled with no root password.
Normally SSH as a LAN-reachable user is not possible without creds —
use the WebUI console instead. (The **installed** system, post-wizard,
has the `server` user with the password the wizard set.)
4. Fetch the short-sha from the VM name → cross-reference against
`git log` to see exactly which commit built the failing ISO.
## Running a smoke test locally
Needs LAN access to the test Proxmox and an API token with VM perms.
```bash
PVE_TEST_HOST=192.168.178.165 \
PVE_TEST_TOKEN='user@pve!smoke=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' \
./scripts/smoke-vm.sh iso/out/furtka-*.iso
```
The script exits 0 on success, non-zero if the VM never served
`http://<ip>:5000`. Pruning runs either way.
## Clearing the 9000-range by hand
If smoke tests wedge or you want a clean slate:
```bash
# List smoke VMs
curl -sSk -H "Authorization: PVEAPIToken=${PVE_TEST_TOKEN}" \
https://192.168.178.165:8006/api2/json/nodes/<node>/qemu \
| python3 -c 'import json,sys; [print(v["vmid"],v["name"]) for v in json.load(sys.stdin)["data"] if 9000<=int(v["vmid"])<=9099]'
# Destroy one
curl -sSk -X POST -H "Authorization: PVEAPIToken=${PVE_TEST_TOKEN}" \
https://192.168.178.165:8006/api2/json/nodes/<node>/qemu/<vmid>/status/stop
curl -sSk -X DELETE -H "Authorization: PVEAPIToken=${PVE_TEST_TOKEN}" \
"https://192.168.178.165:8006/api2/json/nodes/<node>/qemu/<vmid>?purge=1&destroy-unreferenced-disks=1"
```
Or just run `scripts/smoke-vm.sh` with `PVE_TEST_KEEP=0` and any ISO —
the prune step will sweep everything in the range except the one it
just created.
## Proxmox API token setup (one-time)
1. WebUI → **Datacenter → Permissions → API Tokens → Add**
2. User: `root@pam` (or a dedicated `smoke@pve` user — see below)
3. Token ID: `smoke`
4. Uncheck **Privilege Separation** for the quick path, or keep it
separated and grant explicit perms below
5. Save the displayed secret once — it's shown only here
Minimum perms on `/` (if privilege-separated):
`VM.Allocate`, `VM.Config.Disk`, `VM.Config.CPU`, `VM.Config.Memory`,
`VM.Config.Network`, `VM.Config.Options`, `VM.Config.HWType`,
`VM.Config.CDROM`, `VM.PowerMgmt`, `VM.Audit`, `Datastore.AllocateTemplate`
(for ISO upload/delete on the `local` content store).
Set the result as Forgejo secret `PVE_TEST_TOKEN` in the format:
```
user@realm!tokenid=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
```
…and `PVE_TEST_HOST` as `192.168.178.165`. That's all the workflow needs.
## Assumptions
- Runner has L2 reachability to `192.168.178.0/24` (MAC→IP discovery
uses `arp-scan` from the runner).
- Test host uses default storage names: `local` for ISOs, `local-lvm` for
disks. Override via `PVE_TEST_ISO_STORAGE` / `PVE_TEST_DISK_STORAGE`.
- Bridge `vmbr0` carries LAN DHCP. Override via `PVE_TEST_BRIDGE`.
If any of those don't match, set the corresponding env var in
`build-iso.yml` (via `env:` on the smoke step) or override on the CLI
when running locally.

View file

@ -1,6 +1,6 @@
# Installer Wizard Flow # Installer Wizard Flow
End-to-end spec for the Furtka first-boot installer. Extends Robert's wireframes in [installer-wireframes.md](./installer-wireframes.md) with the post-install pattern proven by YunoHost (see [competitors.md](./competitors.md#yunohost)). Concrete tech picks are locked in at the bottom so Robert has unambiguous targets for the next coding session. End-to-end spec for the Homebase first-boot installer. Extends Robert's wireframes in [installer-wireframes.md](./installer-wireframes.md) with the post-install pattern proven by YunoHost (see [competitors.md](./competitors.md#yunohost)). Concrete tech picks are locked in at the bottom so Robert has unambiguous targets for the next coding session.
**Status:** Draft spec 2026-04-13. Not implemented yet. Open questions for Robert at the end. **Status:** Draft spec 2026-04-13. Not implemented yet. Open questions for Robert at the end.
@ -23,9 +23,9 @@ End-to-end spec for the Furtka first-boot installer. Extends Robert's wireframes
- mDNS advertising `proksi.local` - mDNS advertising `proksi.local`
- Local CA cert generated on first boot, served at `http://proksi.local/ca.crt` (plaintext port 80 serves **only** the CA cert download and a redirect to HTTPS — nothing else) - Local CA cert generated on first boot, served at `http://proksi.local/ca.crt` (plaintext port 80 serves **only** the CA cert download and a redirect to HTTPS — nothing else)
4. User opens `https://proksi.local` on any device on the same LAN. 4. User opens `https://proksi.local` on any device on the same LAN.
5. **First screen before S1:** browser warning page with a one-click "Download Furtka CA" button and OS-specific install instructions (macOS Keychain, Windows cert store, iOS profile, Android). After install, user clicks "Continue" and proceeds to S1 with no further warnings. 5. **First screen before S1:** browser warning page with a one-click "Download Homebase CA" button and OS-specific install instructions (macOS Keychain, Windows cert store, iOS profile, Android). After install, user clicks "Continue" and proceeds to S1 with no further warnings.
> **Why not self-signed (YunoHost model)?** Users learn to click through warnings, which kills trust. A single one-time CA install gives clean green padlocks for every Furtka service after. > **Why not self-signed (YunoHost model)?** Users learn to click through warnings, which kills trust. A single one-time CA install gives clean green padlocks for every Homebase service after.
--- ---
@ -68,7 +68,7 @@ From Robert's wireframe Screen 3. One screen per non-boot drive.
| Purpose | Action at install time | | Purpose | Action at install time |
|---------|------------------------| |---------|------------------------|
| Mass storage (default for large HDDs) | Added to a shared data pool mounted at `/mnt/data`. | | Mass storage (default for large HDDs) | Added to a shared data pool mounted at `/mnt/data`. |
| App storage (default for fast drives) | Mounted at `/var/lib/docker/furtka-apps`, used for container volumes. | | App storage (default for fast drives) | Mounted at `/var/lib/docker/homebase-apps`, used for container volumes. |
| Backup target | Formatted and scheduled in the backup service (deferred). | | Backup target | Formatted and scheduled in the backup service (deferred). |
| I don't know | Same as "Mass storage" — safe fallback. | | I don't know | Same as "Mass storage" — safe fallback. |
@ -90,8 +90,8 @@ The YunoHost-style domain picker. Three paths:
| Option | What it does | Who it's for | | Option | What it does | Who it's for |
|--------|--------------|--------------| |--------|--------------|--------------|
| **Free `*.furtka.cloud` subdomain** | User picks `myname.furtka.cloud`. DNS is ours, wildcard cert via DNS-01 issued automatically. Managed gateway path. | The "your dad" default. | | **Free `*.homebase.cloud` subdomain** | User picks `myname.homebase.cloud`. DNS is ours, wildcard cert via DNS-01 issued automatically. Managed gateway path. | The "your dad" default. |
| **Bring your own domain** | User enters e.g. `example.com`. Wizard shows two NS records (`ns1.furtka.cloud`, `ns2.furtka.cloud`) to paste at their registrar. We handle the rest. | Users who already have a domain. | | **Bring your own domain** | User enters e.g. `example.com`. Wizard shows two NS records (`ns1.homebase.cloud`, `ns2.homebase.cloud`) to paste at their registrar. We handle the rest. | Users who already have a domain. |
| **Skip — LAN only** | No public domain. Apps accessible as `<app>.proksi.local` via mDNS, with certs from the local CA. | Paranoid users / offline networks. | | **Skip — LAN only** | No public domain. Apps accessible as `<app>.proksi.local` via mDNS, with certs from the local CA. | Paranoid users / offline networks. |
Screen validates propagation in the background (NS query every 5s, green check when `nslookup` resolves to our NS). User can click "Next" past yellow, but not past red. Screen validates propagation in the background (NS query every 5s, green check when `nslookup` resolves to our NS). User can click "Next" past yellow, but not past red.
@ -102,7 +102,7 @@ Auto-configured based on S5.
| S5 choice | S6 behavior | | S5 choice | S6 behavior |
|-----------|-------------| |-----------|-------------|
| Free subdomain | Silent — wildcard cert already issued by our infra, Caddy picks it up. One line: "✅ TLS ready for `*.myname.furtka.cloud`." | | Free subdomain | Silent — wildcard cert already issued by our infra, Caddy picks it up. One line: "✅ TLS ready for `*.myname.homebase.cloud`." |
| BYO domain | Waits for NS propagation (can take minuteshours). Page auto-refreshes. Shows "⏳ Waiting for `example.com` to point to us…" → green check. | | BYO domain | Waits for NS propagation (can take minuteshours). Page auto-refreshes. Shows "⏳ Waiting for `example.com` to point to us…" → green check. |
| LAN only | Local CA cert used for `*.proksi.local`. One line: "✅ Local TLS ready." | | LAN only | Local CA cert used for `*.proksi.local`. One line: "✅ Local TLS ready." |
@ -127,8 +127,8 @@ YunoHost-style health check before confirming. Runs in parallel, shows results a
Shows the full `user_configuration.json` and `credentials.json` side by side (credentials masked, with a "show" toggle). User clicks **Install** → wizard POSTs to `/install/run`, which: Shows the full `user_configuration.json` and `credentials.json` side by side (credentials masked, with a "show" toggle). User clicks **Install** → wizard POSTs to `/install/run`, which:
1. Writes the two JSON files to `/tmp/furtka/`. 1. Writes the two JSON files to `/tmp/homebase/`.
2. Fires `archinstall --config /tmp/furtka/user_configuration.json --creds /tmp/furtka/user_credentials.json --silent`. 2. Fires `archinstall --config /tmp/homebase/user_configuration.json --creds /tmp/homebase/user_credentials.json --silent`.
3. Streams output to a log pane on-screen. 3. Streams output to a log pane on-screen.
4. On success: shows "🎉 Install complete. Remove USB and reboot. After reboot, log in at `https://<your-domain>`." 4. On success: shows "🎉 Install complete. Remove USB and reboot. After reboot, log in at `https://<your-domain>`."
@ -146,7 +146,7 @@ The wizard accumulates answers in the Flask `settings` dict at `webinstaller/app
| S2 `boot_drive` | `user_configuration.json` | `disk_config.device` | | S2 `boot_drive` | `user_configuration.json` | `disk_config.device` |
| S3 per-device mounts | `user_configuration.json` | `disk_config.additional_mounts` (new field; extend archinstall profile) | | S3 per-device mounts | `user_configuration.json` | `disk_config.additional_mounts` (new field; extend archinstall profile) |
| S4 `network_mode` | `user_configuration.json` | `network_config.type` | | S4 `network_mode` | `user_configuration.json` | `network_config.type` |
| S5 `domain` | — | Stored in Furtka state (`/etc/furtka/domain.conf`), read by Caddy + Authentik at first boot post-install | | S5 `domain` | — | Stored in Homebase state (`/etc/homebase/domain.conf`), read by Caddy + Authentik at first boot post-install |
| S5 `domain_mode` | — | Same | | S5 `domain_mode` | — | Same |
| `admin_username` | `user_credentials.json` | `users[0].username` | | `admin_username` | `user_credentials.json` | `users[0].username` |
| `admin_password` | `user_credentials.json` | `users[0].password` | | `admin_password` | `user_credentials.json` | `users[0].password` |
@ -154,7 +154,7 @@ The wizard accumulates answers in the Flask `settings` dict at `webinstaller/app
**New archinstall fields** (beyond what's in the current `archinstall/user_configuration.json`): **New archinstall fields** (beyond what's in the current `archinstall/user_configuration.json`):
- `disk_config.additional_mounts` — list of `{device, mountpoint, purpose}` — needs a custom archinstall profile script. - `disk_config.additional_mounts` — list of `{device, mountpoint, purpose}` — needs a custom archinstall profile script.
- Bootstrap script hook to deploy Caddy + Authentik + Furtka admin UI containers on first boot. - Bootstrap script hook to deploy Caddy + Authentik + Homebase admin UI containers on first boot.
--- ---
@ -164,7 +164,7 @@ The wizard accumulates answers in the Flask `settings` dict at `webinstaller/app
|----------|--------|-----|----------| |----------|--------|-----|----------|
| Reverse proxy | **Caddy** | Automatic Let's Encrypt built-in. Caddyfile is the simplest config of any reverse proxy — matches the "simple" ethos. Auto-reloads on config change. | Traefik (label-based config is elegant for Docker but overkill and Kubernetes-flavored), nginx (battle-tested but manual SSL = every competitor's failure mode). | | Reverse proxy | **Caddy** | Automatic Let's Encrypt built-in. Caddyfile is the simplest config of any reverse proxy — matches the "simple" ethos. Auto-reloads on config change. | Traefik (label-based config is elegant for Docker but overkill and Kubernetes-flavored), nginx (battle-tested but manual SSL = every competitor's failure mode). |
| Identity provider | **Authentik** | Bundled SSO from day one — every app template wires to it at install (YunoHost's best move). Active development, clean admin UI, OIDC + SAML + LDAP. | Authelia (lighter but worse UI and no built-in user management — needs external LDAP), external-only (loses the YunoHost wedge of SSO-by-default). | | Identity provider | **Authentik** | Bundled SSO from day one — every app template wires to it at install (YunoHost's best move). Active development, clean admin UI, OIDC + SAML + LDAP. | Authelia (lighter but worse UI and no built-in user management — needs external LDAP), external-only (loses the YunoHost wedge of SSO-by-default). |
| Managed gateway DNS | **NS delegation** to `ns1.furtka.cloud` / `ns2.furtka.cloud` | User delegates once at registrar; we handle wildcard cert via Let's Encrypt DNS-01, subdomain creation, propagation. The single biggest UX cliff every competitor dies on. | CNAME-per-subdomain (clunky, users see our hostnames in records), manual A records (the exact pain point we're solving). | | Managed gateway DNS | **NS delegation** to `ns1.homebase.cloud` / `ns2.homebase.cloud` | User delegates once at registrar; we handle wildcard cert via Let's Encrypt DNS-01, subdomain creation, propagation. The single biggest UX cliff every competitor dies on. | CNAME-per-subdomain (clunky, users see our hostnames in records), manual A records (the exact pain point we're solving). |
| Local HTTPS | **Local CA** generated at first boot | Single cert install in browser → green padlock for every service, no warnings ever. | Self-signed (YunoHost's model — users learn to click through warnings), public CA for `.proksi.local` (impossible — `.local` is reserved for mDNS), Tailscale-style ACME (good but adds a vendor dependency). | | Local HTTPS | **Local CA** generated at first boot | Single cert install in browser → green padlock for every service, no warnings ever. | Self-signed (YunoHost's model — users learn to click through warnings), public CA for `.proksi.local` (impossible — `.local` is reserved for mDNS), Tailscale-style ACME (good but adds a vendor dependency). |
| Base OS | **Arch** (confirmed) | Rolling releases, Robert's existing Proxmox work. No user-visible difference. | Debian fallback remains documented in README. | | Base OS | **Arch** (confirmed) | Rolling releases, Robert's existing Proxmox work. No user-visible difference. | Debian fallback remains documented in README. |
| Container runtime | **Docker + Compose** | Confirmed in README. Authentik and Caddy ship as Compose stacks. | Podman (cleaner rootless story, but Compose ecosystem is smaller and Authentik ships Docker-first). | | Container runtime | **Docker + Compose** | Confirmed in README. Authentik and Caddy ship as Compose stacks. | Podman (cleaner rootless story, but Compose ecosystem is smaller and Authentik ships Docker-first). |
@ -183,7 +183,7 @@ The wizard accumulates answers in the Flask `settings` dict at `webinstaller/app
## Open questions for Robert ## Open questions for Robert
1. **"Backend on/off" and "Backend address" fields** in your Screen 1 wireframe — what do these configure? Dropped from S1 above until this is clear. If it's "connect to Furtka cloud for managed gateway," that's already covered by the S5 "free subdomain" path. 1. **"Backend on/off" and "Backend address" fields** in your Screen 1 wireframe — what do these configure? Dropped from S1 above until this is clear. If it's "connect to Homebase cloud for managed gateway," that's already covered by the S5 "free subdomain" path.
2. **Local CA vs Tailscale-style ACME** — I locked in local CA because it's self-contained and works offline. Tailscale's approach (public cert for `*.ts.net`) is smoother but requires Tailscale. Your call. 2. **Local CA vs Tailscale-style ACME** — I locked in local CA because it's self-contained and works offline. Tailscale's approach (public cert for `*.ts.net`) is smoother but requires Tailscale. Your call.
3. **Wizard UI framework** — currently Jinja templates + plain HTML (see `webinstaller/templates/`). If you want HTMX or Alpine for the async parts (S6 cert wait, S7 diagnostics), decide now so the templates don't need rework later. 3. **Wizard UI framework** — currently Jinja templates + plain HTML (see `webinstaller/templates/`). If you want HTMX or Alpine for the async parts (S6 cert wait, S7 diagnostics), decide now so the templates don't need rework later.
4. **Language list** — which languages ship in v1? Defaulting to English + German + Polish would cover us; anything else up to you. 4. **Language list** — which languages ship in v1? Defaulting to English + German + Polish would cover us; anything else up to you.

View file

View file

@ -1,115 +0,0 @@
"""Shared primitives for release-tarball flows.
Both ``furtka.updater`` (core self-update) and ``furtka.catalog`` (apps
catalog sync) pull a tarball from a Forgejo Releases page, verify its
SHA256 against the ``.sha256`` sidecar, and extract it with a path-
traversal guard. The helpers here are the single implementation of
that dance.
Each error-raising helper accepts an ``error_cls`` kwarg so callers can
keep their domain-specific exception type (``UpdateError``,
``CatalogError``) at call sites the helper itself defaults to a
neutral ``ReleaseError`` for use in tests or standalone scripts.
"""
from __future__ import annotations
import hashlib
import json
import shutil
import tarfile
import urllib.error
import urllib.request
from pathlib import Path
class ReleaseError(RuntimeError):
"""Neutral failure for release-tarball operations."""
def forgejo_api(host: str, repo: str, path: str, *, error_cls: type = ReleaseError) -> dict | list:
url = f"https://{host}/api/v1/repos/{repo}{path}"
req = urllib.request.Request(url, headers={"Accept": "application/json"})
try:
with urllib.request.urlopen(req, timeout=15) as resp:
return json.loads(resp.read())
except (urllib.error.URLError, json.JSONDecodeError) as e:
raise error_cls(f"forgejo api {url}: {e}") from e
def download(url: str, dest: Path, *, error_cls: type = ReleaseError) -> None:
dest.parent.mkdir(parents=True, exist_ok=True)
req = urllib.request.Request(url)
try:
with urllib.request.urlopen(req, timeout=60) as resp, dest.open("wb") as f:
shutil.copyfileobj(resp, f)
except urllib.error.URLError as e:
raise error_cls(f"download {url}: {e}") from e
def sha256_of(path: Path) -> str:
h = hashlib.sha256()
with path.open("rb") as f:
for chunk in iter(lambda: f.read(1024 * 1024), b""):
h.update(chunk)
return h.hexdigest()
def verify_tarball(tarball: Path, expected_sha: str, *, error_cls: type = ReleaseError) -> None:
actual = sha256_of(tarball)
if actual != expected_sha:
raise error_cls(f"sha256 mismatch: expected {expected_sha}, got {actual}")
def parse_sha256_sidecar(text: str, *, error_cls: type = ReleaseError) -> str:
"""Extract the hash from a standard `sha256sum` sidecar line."""
line = text.strip().split("\n", 1)[0].strip()
if not line:
raise error_cls("empty sha256 sidecar")
return line.split()[0]
def extract_tarball(tarball: Path, dest: Path, *, error_cls: type = ReleaseError) -> str:
"""Extract the tarball and return the VERSION read from its root.
Refuses entries that could escape ``dest`` via absolute paths or ``..``
segments. On Python 3.12+ the stricter ``data`` filter is additionally
enabled to catch symlink-escape / device-node / setuid tricks that the
regex check can't see.
"""
dest.mkdir(parents=True, exist_ok=True)
with tarfile.open(tarball, "r:gz") as tf:
for member in tf.getmembers():
if member.name.startswith(("/", "..")) or ".." in Path(member.name).parts:
raise error_cls(f"refusing tarball entry {member.name!r}")
try:
tf.extractall(dest, filter="data")
except TypeError:
tf.extractall(dest)
version_file = dest / "VERSION"
if not version_file.is_file():
raise error_cls("tarball has no VERSION file at root")
return version_file.read_text().strip()
def version_tuple(v: str) -> tuple:
"""CalVer comparator: 26.1-alpha < 26.1-beta < 26.1-rc < 26.1 < 26.2-alpha.
Pre-release stages sort before the corresponding stable (no-suffix)
release. Unknown suffixes sort below everything except the malformed
fallback. Returns a tuple of (year, release, stage_rank, suffix).
"""
stage_rank = {"alpha": 0, "beta": 1, "rc": 2}
head, _, suffix = v.partition("-")
try:
year_str, release_str = head.split(".", 1)
year = int(year_str)
release = int(release_str)
except (ValueError, IndexError):
return (-1, -1, -1, v)
if not suffix:
return (year, release, 3, "")
for name, rank in stage_rank.items():
if suffix.startswith(name):
return (year, release, rank, suffix)
return (year, release, -1, suffix)

File diff suppressed because it is too large Load diff

View file

@ -1,260 +0,0 @@
"""Login-guard primitives for the Furtka UI.
One admin, one password. Passwords are PBKDF2-SHA256 hashed via
``furtka.passwd`` (stdlib-only hashlib.pbkdf2_hmac / hashlib.scrypt),
stored in /var/lib/furtka/users.json with mode 0600. Sessions live in
memory a systemctl restart logs everyone out again, which is fine
for an alpha single-user box. The ``LoginAttempts`` store in this
module rate-limits failed logins per (username, IP) and is also
in-memory; a restart clears a stuck lockout.
On upgrade from pre-auth Furtka the users.json file does not exist
yet; the api's GET /login detects this via ``setup_needed()`` and
renders a first-run form that POSTs to /login as if it were a setup
submit. Fresh installs get the file pre-populated by the webinstaller
so the setup step is skipped.
Hash format is compatible with werkzeug.security 26.11 / 26.12 boxes
that happened to have werkzeug installed can carry their users.json
forward without re-setup; see ``furtka.passwd`` for the scrypt reader.
"""
from __future__ import annotations
import json
import math
import secrets
import threading
from dataclasses import dataclass
from datetime import UTC, datetime, timedelta
from furtka.passwd import hash_password as _hash_password
from furtka.passwd import verify_password as _verify_password
from furtka.paths import users_file
COOKIE_NAME = "furtka_session"
COOKIE_TTL_SECONDS = 7 * 24 * 3600 # one week
def hash_password(plain: str) -> str:
"""PBKDF2-SHA256 via stdlib. 600k iterations (OWASP 2023)."""
return _hash_password(plain)
def verify_password(plain: str, hashed: str) -> bool:
"""Constant-time compare. Accepts stdlib + legacy werkzeug formats."""
return _verify_password(plain, hashed)
def load_users() -> dict:
"""Return the users dict, or {} if the file is missing or empty.
Missing-file is the expected state on first boot and on upgrades from
pre-auth versions callers treat empty-dict as "setup required".
"""
path = users_file()
if not path.exists():
return {}
try:
raw = path.read_text()
except OSError:
return {}
if not raw.strip():
return {}
try:
data = json.loads(raw)
except json.JSONDecodeError:
return {}
if not isinstance(data, dict):
return {}
return data
def save_users(users: dict) -> None:
"""Atomically write users.json with mode 0600.
Same pattern as installer.write_env write to .tmp, chmod, rename
so a crash between open() and close() can't leave a world-readable
partial file.
"""
path = users_file()
path.parent.mkdir(parents=True, exist_ok=True)
tmp = path.with_suffix(path.suffix + ".tmp")
tmp.write_text(json.dumps(users, indent=2) + "\n")
tmp.chmod(0o600)
tmp.replace(path)
def setup_needed() -> bool:
"""True when no admin is registered yet — initial setup is required."""
users = load_users()
return not users or "admin" not in users
def create_admin(username: str, password: str) -> None:
"""Overwrite users.json with a single admin account.
The webinstaller calls this post-install (with the step-1 password) so
the installed system is login-guarded from first boot. The /login
route calls it on first setup for upgrade-path boxes that don't
already have a users.json.
"""
users = {
"admin": {
"username": username,
"hash": hash_password(password),
"created_at": datetime.now(UTC).isoformat(timespec="seconds"),
}
}
save_users(users)
def authenticate(username: str, password: str) -> bool:
"""Return True iff the supplied credentials match the admin record."""
users = load_users()
admin = users.get("admin")
if not admin:
return False
if admin.get("username") != username:
return False
hashed = admin.get("hash")
if not isinstance(hashed, str) or not hashed:
return False
return verify_password(password, hashed)
@dataclass(frozen=True)
class Session:
token: str
username: str
expires_at: datetime
class SessionStore:
"""In-memory session table. Thread-safe (api.py uses the stdlib
HTTPServer which handles one request per thread though the default
variant is single-threaded, we keep the lock so swapping to
ThreadingHTTPServer later doesn't require revisiting this).
"""
def __init__(self, ttl_seconds: int = COOKIE_TTL_SECONDS) -> None:
self._ttl = timedelta(seconds=ttl_seconds)
self._by_token: dict[str, Session] = {}
self._lock = threading.Lock()
def create(self, username: str) -> Session:
token = secrets.token_urlsafe(32)
session = Session(
token=token,
username=username,
expires_at=datetime.now(UTC) + self._ttl,
)
with self._lock:
self._by_token[token] = session
return session
def lookup(self, token: str | None) -> Session | None:
if not token:
return None
with self._lock:
session = self._by_token.get(token)
if session is None:
return None
if datetime.now(UTC) >= session.expires_at:
# Expired — drop it on the floor so repeat lookups stay fast.
self._by_token.pop(token, None)
return None
return session
def revoke(self, token: str | None) -> None:
if not token:
return
with self._lock:
self._by_token.pop(token, None)
def clear(self) -> None:
"""Test helper — wipe all sessions."""
with self._lock:
self._by_token.clear()
class LoginAttempts:
"""In-memory rate-limiter for failed logins, keyed by (username, ip).
Parallels SessionStore: thread-safe, uses ``datetime.now(UTC)`` so the
same ``_FakeDatetime`` test shim works, lives only in memory so a
``systemctl restart furtka`` wipes a stuck lockout. Tuple keying means
a flood from one source IP can't lock the admin out from elsewhere
(different IP different key) the trade-off is that an attacker
can keep probing forever by rotating IPs, but they still eat the
PBKDF2 cost per attempt.
Stored data is a dict[key list[datetime]] of recent failure
timestamps. Every call prunes entries older than ``WINDOW_SECONDS``,
so memory per active key is bounded by ``MAX_FAILURES``.
"""
MAX_FAILURES = 10
WINDOW_SECONDS = 15 * 60
LOCKOUT_SECONDS = 15 * 60
def __init__(
self,
max_failures: int = MAX_FAILURES,
window_seconds: int = WINDOW_SECONDS,
lockout_seconds: int = LOCKOUT_SECONDS,
) -> None:
self._max = max_failures
self._window = timedelta(seconds=window_seconds)
self._lockout = timedelta(seconds=lockout_seconds)
self._fails: dict[tuple[str, str], list[datetime]] = {}
self._lock = threading.Lock()
def _prune_locked(self, key: tuple[str, str], now: datetime) -> list[datetime]:
"""Drop timestamps older than the window; caller holds self._lock."""
cutoff = now - self._window
kept = [ts for ts in self._fails.get(key, ()) if ts >= cutoff]
if kept:
self._fails[key] = kept
else:
self._fails.pop(key, None)
return kept
def register_failure(self, key: tuple[str, str]) -> None:
now = datetime.now(UTC)
with self._lock:
self._prune_locked(key, now)
self._fails.setdefault(key, []).append(now)
def is_locked(self, key: tuple[str, str]) -> bool:
return self.retry_after_seconds(key) > 0
def retry_after_seconds(self, key: tuple[str, str]) -> int:
"""Seconds remaining on an active lockout, or 0 if not locked."""
now = datetime.now(UTC)
with self._lock:
kept = self._prune_locked(key, now)
if len(kept) < self._max:
return 0
# Lockout runs from the oldest retained failure; once it
# falls off the window the key is effectively released.
unlock_at = kept[0] + self._lockout
remaining = (unlock_at - now).total_seconds()
if remaining <= 0:
return 0
return max(1, math.ceil(remaining))
def clear(self, key: tuple[str, str]) -> None:
with self._lock:
self._fails.pop(key, None)
def clear_all(self) -> None:
"""Test helper — wipe all failure state."""
with self._lock:
self._fails.clear()
# Module-level singleton used by the HTTP handler.
SESSIONS = SessionStore()
LOCKOUT = LoginAttempts()

View file

@ -1,253 +0,0 @@
"""Furtka apps catalog sync.
Mirrors the shape of ``furtka.updater`` but targets a separate Forgejo
repo (``daniel/furtka-apps`` by default) whose releases carry a single
``furtka-apps-<ver>.tar.gz`` with ``VERSION`` at the root and an
``apps/<name>/`` tree underneath. Pulling the catalog keeps the on-box
app ecosystem fresh without requiring a Furtka core release core
ships a seed ``apps/`` under ``/opt/furtka/current/apps/`` that the
resolver falls back to when the catalog is empty or stale.
Flow of ``sync_catalog()``:
1. flock on ``/run/furtka/catalog.lock`` so two triggers (timer + manual
UI click) can't race.
2. ``check_catalog()`` asks Forgejo for the latest release and picks out
the tarball + sidecar URLs.
3. Download tarball + sidecar to ``/var/lib/furtka/catalog/_downloads/``.
4. Verify the sha256 sidecar against the tarball.
5. Extract into ``/var/lib/furtka/catalog/_staging/``.
6. Validate every ``apps/<name>/manifest.json`` via ``furtka.manifest.
load_manifest``. A broken catalog release is refused here, not half-
applied.
7. Atomic rename: existing live catalog ``catalog.prev/``, staging
``catalog/``, then rmtree the prev. Any failure before this step
leaves the live catalog untouched.
8. Write ``/var/lib/furtka/catalog-state.json`` for the UI.
Paths can be overridden via env vars so tests can redirect everything to
a tmp dir.
"""
from __future__ import annotations
import fcntl
import json
import os
import shutil
import time
from dataclasses import dataclass
from pathlib import Path
from furtka import _release_common as _rc
from furtka.manifest import ManifestError, load_manifest
from furtka.paths import catalog_dir
FORGEJO_HOST = os.environ.get("FURTKA_FORGEJO_HOST", "forgejo.sourcegate.online")
CATALOG_REPO = os.environ.get("FURTKA_CATALOG_REPO", "daniel/furtka-apps")
_CATALOG_STATE = Path(os.environ.get("FURTKA_CATALOG_STATE", "/var/lib/furtka/catalog-state.json"))
_LOCK_PATH = Path(os.environ.get("FURTKA_CATALOG_LOCK", "/run/furtka/catalog.lock"))
_STAGING_NAME = "_staging"
_DOWNLOADS_NAME = "_downloads"
_PREV_SUFFIX = ".prev"
_VERSION_FILE = "VERSION"
class CatalogError(RuntimeError):
"""Any failure in the catalog sync flow that should surface to the caller."""
@dataclass(frozen=True)
class CatalogCheck:
current: str | None
latest: str
update_available: bool
tarball_url: str | None
sha256_url: str | None
def state_path() -> Path:
return _CATALOG_STATE
def lock_path() -> Path:
return _LOCK_PATH
def read_current_catalog_version() -> str | None:
"""Return the string in <catalog_dir>/VERSION, or None if absent / unreadable."""
try:
value = (catalog_dir() / _VERSION_FILE).read_text().strip()
except (FileNotFoundError, NotADirectoryError, OSError):
return None
return value or None
def check_catalog() -> CatalogCheck:
"""Query Forgejo for the latest catalog release.
Uses ``/releases?limit=1`` (not ``/releases/latest``) for the same
reason the core updater does Forgejo's ``latest`` endpoint skips
pre-releases and 404s when every tag carries a suffix.
"""
current = read_current_catalog_version()
releases = _rc.forgejo_api(
FORGEJO_HOST, CATALOG_REPO, "/releases?limit=1", error_cls=CatalogError
)
if not isinstance(releases, list) or not releases:
raise CatalogError("no catalog releases published yet")
release = releases[0]
latest = str(release.get("tag_name") or "").strip()
if not latest:
raise CatalogError("latest catalog release has empty tag_name")
tarball_url = None
sha256_url = None
for asset in release.get("assets") or []:
name = asset.get("name") or ""
url = asset.get("browser_download_url") or ""
if name.endswith(".tar.gz") and "furtka-apps-" in name:
tarball_url = url
elif name.endswith(".tar.gz.sha256"):
sha256_url = url
available = latest != current and (
current is None or _rc.version_tuple(latest) > _rc.version_tuple(current)
)
return CatalogCheck(
current=current,
latest=latest,
update_available=available,
tarball_url=tarball_url,
sha256_url=sha256_url,
)
def write_state(stage: str, **extra) -> None:
"""Atomic JSON state write — same shape as updater's update-state.json."""
state_path().parent.mkdir(parents=True, exist_ok=True)
tmp = state_path().with_suffix(".tmp")
payload = {"stage": stage, "updated_at": time.strftime("%Y-%m-%dT%H:%M:%S%z"), **extra}
tmp.write_text(json.dumps(payload, indent=2))
tmp.replace(state_path())
def read_state() -> dict:
try:
return json.loads(state_path().read_text())
except (FileNotFoundError, json.JSONDecodeError):
return {}
def acquire_lock():
path = lock_path()
path.parent.mkdir(parents=True, exist_ok=True)
fh = path.open("w")
try:
fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError as e:
fh.close()
raise CatalogError("another catalog sync is already in progress") from e
return fh
def _validate_staging(staging: Path, expected_version: str) -> None:
"""Fail hard if the staging tree isn't a well-formed catalog release."""
version_file = staging / _VERSION_FILE
if not version_file.is_file():
raise CatalogError("catalog tarball has no VERSION file at root")
actual = version_file.read_text().strip()
if actual != expected_version:
raise CatalogError(
f"catalog tarball VERSION ({actual!r}) doesn't match expected ({expected_version!r})"
)
apps_root = staging / "apps"
if not apps_root.is_dir():
raise CatalogError("catalog tarball has no apps/ directory")
for entry in sorted(apps_root.iterdir()):
if not entry.is_dir():
continue
manifest_path = entry / "manifest.json"
if not manifest_path.exists():
raise CatalogError(f"catalog app {entry.name!r} has no manifest.json")
try:
load_manifest(manifest_path, expected_name=entry.name)
except ManifestError as e:
raise CatalogError(f"catalog app {entry.name!r}: invalid manifest: {e}") from e
def _atomic_swap(staging: Path) -> None:
"""Move staging → live catalog, keeping the previous tree as .prev until
the rename succeeds so we never leave a half-written catalog on disk."""
live = catalog_dir()
live.parent.mkdir(parents=True, exist_ok=True)
prev = live.with_name(live.name + _PREV_SUFFIX)
if prev.exists():
shutil.rmtree(prev)
if live.exists():
live.rename(prev)
try:
staging.rename(live)
except OSError as e:
if prev.exists():
# try to restore the previous tree; if that also fails the box
# has no catalog at all until the next sync — still better than
# a partially-extracted tree.
try:
prev.rename(live)
except OSError:
pass
raise CatalogError(f"atomic catalog swap failed: {e}") from e
if prev.exists():
shutil.rmtree(prev, ignore_errors=True)
def sync_catalog() -> CatalogCheck:
"""End-to-end sync. Acquires the lock, writes state at each stage, and
leaves the live catalog untouched on any failure before the rename step.
"""
with acquire_lock():
write_state("checking")
check = check_catalog()
if not check.update_available:
write_state("done", version=check.current or check.latest, note="already up to date")
return check
if not check.tarball_url or not check.sha256_url:
raise CatalogError("catalog release is missing tarball or sha256 asset")
# Downloads land in a sibling of the live catalog so half-finished
# artefacts never pollute the live tree, and stay under /var/lib/
# furtka/ so a sync interrupted by reboot can resume instead of
# starting over from /tmp (which clears).
dl_dir = catalog_dir().with_name(catalog_dir().name + _DOWNLOADS_NAME)
dl_dir.mkdir(parents=True, exist_ok=True)
tarball = dl_dir / f"furtka-apps-{check.latest}.tar.gz"
sha_file = dl_dir / f"furtka-apps-{check.latest}.tar.gz.sha256"
write_state("downloading", latest=check.latest)
_rc.download(check.tarball_url, tarball, error_cls=CatalogError)
_rc.download(check.sha256_url, sha_file, error_cls=CatalogError)
write_state("verifying", latest=check.latest)
expected = _rc.parse_sha256_sidecar(sha_file.read_text(), error_cls=CatalogError)
_rc.verify_tarball(tarball, expected, error_cls=CatalogError)
write_state("extracting", latest=check.latest)
staging = catalog_dir().with_name(catalog_dir().name + _STAGING_NAME)
if staging.exists():
shutil.rmtree(staging)
try:
_rc.extract_tarball(tarball, staging, error_cls=CatalogError)
_validate_staging(staging, check.latest)
except CatalogError:
shutil.rmtree(staging, ignore_errors=True)
raise
write_state("swapping", latest=check.latest)
try:
_atomic_swap(staging)
except CatalogError:
shutil.rmtree(staging, ignore_errors=True)
raise
write_state("done", version=check.latest, previous=check.current)
return check

View file

@ -1,348 +0,0 @@
import argparse
import json
import sys
from furtka import dockerops, installer, reconciler
from furtka.paths import apps_dir
from furtka.scanner import scan
def _cmd_app_list(args: argparse.Namespace) -> int:
results = scan(apps_dir())
if args.json:
out = [
{
"path": str(r.path),
"name": r.manifest.name if r.manifest else None,
"ok": r.ok,
"error": r.error,
"manifest": {
"name": r.manifest.name,
"display_name": r.manifest.display_name,
"version": r.manifest.version,
"description": r.manifest.description,
"description_long": r.manifest.description_long,
"volumes": list(r.manifest.volumes),
"ports": list(r.manifest.ports),
"icon": r.manifest.icon,
"open_url": r.manifest.open_url,
"settings": [
{
"name": s.name,
"label": s.label,
"description": s.description,
"type": s.type,
"required": s.required,
"default": s.default,
}
for s in r.manifest.settings
],
}
if r.manifest
else None,
}
for r in results
]
print(json.dumps(out, indent=2))
return 0
if not results:
print("(no apps installed)")
return 0
for r in results:
if r.ok:
m = r.manifest
print(f"{m.name:20s} {m.version:10s} {m.display_name}")
else:
print(f"{r.path.name:20s} ERROR {r.error}")
return 0
def _cmd_app_install(args: argparse.Namespace) -> int:
try:
src = installer.resolve_source(args.source)
target = installer.install_from(src)
except installer.InstallError as e:
print(f"error: {e}", file=sys.stderr)
return 2
print(f"installed {target.name} to {target}")
actions = reconciler.reconcile(apps_dir())
for a in actions:
print(f" {a.describe()}")
return 1 if reconciler.has_errors(actions) else 0
def _cmd_app_install_bg(args: argparse.Namespace) -> int:
"""Docker-facing phases of an install — called by the API via systemd-run.
Internal subcommand; normal CLI users want `app install` (synchronous).
This exists to separate the slow docker pull/up from the synchronous
validation the API does inline, so the UI can poll a state file.
"""
from furtka import install_runner
try:
install_runner.run_install(args.name)
except Exception as e:
# run_install already wrote state="error"; echo for journald.
print(f"install-bg failed: {e}", file=sys.stderr)
return 1
return 0
def _cmd_app_remove(args: argparse.Namespace) -> int:
target = apps_dir() / args.name
if not target.exists():
print(f"error: {args.name!r} is not installed", file=sys.stderr)
return 1
try:
dockerops.compose_down(target, args.name)
except dockerops.DockerError as e:
# Container may already be down (or never came up). Don't block removal.
print(f"warning: compose down failed, removing folder anyway: {e}", file=sys.stderr)
try:
installer.remove(args.name)
except installer.InstallError as e:
print(f"error: {e}", file=sys.stderr)
return 1
print(f"removed {args.name} (volumes preserved)")
return 0
def _cmd_serve(args: argparse.Namespace) -> int:
# Imported lazily so `furtka` startup stays cheap when the user only runs
# `app list` or `reconcile` (the common case during tests + scripts).
from furtka import api
api.serve(args.host, args.port)
return 0
def _cmd_reconcile(args: argparse.Namespace) -> int:
actions = reconciler.reconcile(apps_dir(), dry_run=args.dry_run)
print(f"Scanned {apps_dir()}: {len(actions)} actions")
for a in actions:
print(f" {a.describe()}")
if args.dry_run:
print("(dry-run — nothing changed)")
# Exit non-zero on any per-app failure so systemd marks furtka-reconcile
# red — but only AFTER all apps were attempted, so a broken app doesn't
# hide healthy ones.
return 1 if reconciler.has_errors(actions) else 0
def _cmd_update(args: argparse.Namespace) -> int:
from furtka import updater
if args.check:
try:
check = updater.check_update()
except updater.UpdateError as e:
print(f"error: {e}", file=sys.stderr)
return 2
if args.json:
print(
json.dumps(
{
"current": check.current,
"latest": check.latest,
"update_available": check.update_available,
},
indent=2,
)
)
elif check.update_available:
print(f"Update available: {check.current}{check.latest}")
else:
print(f"Already up to date ({check.current})")
return 0
try:
check = updater.run_update()
except updater.UpdateError as e:
print(f"error: {e}", file=sys.stderr)
return 2
if not check.update_available:
print(f"Already up to date ({check.current})")
else:
print(f"Updated {check.current}{check.latest}")
return 0
def _cmd_rollback(args: argparse.Namespace) -> int:
from furtka import updater
try:
restored = updater.rollback()
except updater.UpdateError as e:
print(f"error: {e}", file=sys.stderr)
return 2
print(f"Rolled back to {restored}")
return 0
def _cmd_catalog_sync(args: argparse.Namespace) -> int:
from furtka import catalog
if args.check:
try:
check = catalog.check_catalog()
except catalog.CatalogError as e:
print(f"error: {e}", file=sys.stderr)
return 2
if args.json:
print(
json.dumps(
{
"current": check.current,
"latest": check.latest,
"update_available": check.update_available,
},
indent=2,
)
)
elif check.update_available:
print(f"Catalog update available: {check.current or '(none)'}{check.latest}")
else:
print(f"Catalog already up to date ({check.current or check.latest})")
return 0
try:
check = catalog.sync_catalog()
except catalog.CatalogError as e:
print(f"error: {e}", file=sys.stderr)
return 2
if not check.update_available:
print(f"Catalog already up to date ({check.current or check.latest})")
else:
print(f"Synced catalog {check.current or '(none)'}{check.latest}")
return 0
def _cmd_catalog_status(args: argparse.Namespace) -> int:
from furtka import catalog
current = catalog.read_current_catalog_version()
state = catalog.read_state()
if args.json:
print(json.dumps({"current": current, "state": state}, indent=2))
return 0
print(f"Catalog version: {current or '(none — run `furtka catalog sync`)'}")
if state:
print(f"Last sync stage: {state.get('stage', '?')} at {state.get('updated_at', '?')}")
else:
print("Last sync stage: (never)")
return 0
def build_parser() -> argparse.ArgumentParser:
p = argparse.ArgumentParser(prog="furtka", description="Furtka resource manager")
sub = p.add_subparsers(dest="command", required=True)
app = sub.add_parser("app", help="Manage installed apps")
app_sub = app.add_subparsers(dest="subcommand", required=True)
app_list = app_sub.add_parser("list", help="List installed apps")
app_list.add_argument("--json", action="store_true", help="Emit JSON instead of a table")
app_list.set_defaults(func=_cmd_app_list)
app_install = app_sub.add_parser(
"install",
help="Install an app from a local folder or a bundled-app name",
)
app_install.add_argument(
"source",
help="Path to an app folder, or the name of a bundled app under /opt/furtka/apps/",
)
app_install.set_defaults(func=_cmd_app_install)
# Internal — called by the HTTP API via systemd-run. Deliberately omitted
# from the help listing; regular CLI users want `app install` above.
app_install_bg = app_sub.add_parser(
"install-bg",
help=argparse.SUPPRESS,
)
app_install_bg.add_argument("name", help="Installed app folder name")
app_install_bg.set_defaults(func=_cmd_app_install_bg)
app_remove = app_sub.add_parser("remove", help="Stop and uninstall an app (keeps volumes)")
app_remove.add_argument("name", help="App name (folder name under /var/lib/furtka/apps/)")
app_remove.set_defaults(func=_cmd_app_remove)
reconcile = sub.add_parser(
"reconcile",
help="Bring docker state in line with /var/lib/furtka/apps",
)
reconcile.add_argument(
"--dry-run",
action="store_true",
help="Show what would be done without changing anything",
)
reconcile.set_defaults(func=_cmd_reconcile)
serve = sub.add_parser("serve", help="Run the resource-manager HTTP API + UI")
serve.add_argument("--host", default="127.0.0.1", help="Bind address (default 127.0.0.1)")
serve.add_argument("--port", type=int, default=7000, help="Bind port (default 7000)")
serve.set_defaults(func=_cmd_serve)
update = sub.add_parser(
"update",
help="Check for or apply a Furtka release (Phase 2 self-update)",
)
update.add_argument(
"--check",
action="store_true",
help="Only check whether an update is available; don't apply",
)
update.add_argument(
"--json",
action="store_true",
help="Emit machine-readable JSON (only honoured with --check)",
)
update.set_defaults(func=_cmd_update)
rollback = sub.add_parser(
"rollback",
help="Flip /opt/furtka/current back to the previous version slot",
)
rollback.set_defaults(func=_cmd_rollback)
catalog = sub.add_parser("catalog", help="Manage the apps catalog (daniel/furtka-apps)")
catalog_sub = catalog.add_subparsers(dest="subcommand", required=True)
catalog_sync = catalog_sub.add_parser(
"sync",
help="Download and install the latest apps catalog from Forgejo",
)
catalog_sync.add_argument(
"--check",
action="store_true",
help="Only check whether a catalog update is available; don't apply",
)
catalog_sync.add_argument(
"--json",
action="store_true",
help="Emit machine-readable JSON (only honoured with --check)",
)
catalog_sync.set_defaults(func=_cmd_catalog_sync)
catalog_status = catalog_sub.add_parser(
"status",
help="Print the currently-installed catalog version and last-sync stage",
)
catalog_status.add_argument(
"--json",
action="store_true",
help="Emit machine-readable JSON",
)
catalog_status.set_defaults(func=_cmd_catalog_status)
return p
def main(argv: list[str] | None = None) -> int:
args = build_parser().parse_args(argv)
return args.func(args)
if __name__ == "__main__":
sys.exit(main())

View file

@ -1,119 +0,0 @@
import json
import subprocess
from pathlib import Path
class DockerError(RuntimeError):
pass
def _run(args: list[str], cwd: Path | None = None, check: bool = True):
proc = subprocess.run(
args,
cwd=cwd,
check=False,
capture_output=True,
text=True,
)
if check and proc.returncode != 0:
msg = proc.stderr.strip() or proc.stdout.strip()
raise DockerError(f"{' '.join(args)} exited {proc.returncode}: {msg}")
return proc
def volume_exists(name: str) -> bool:
return _run(["docker", "volume", "inspect", name], check=False).returncode == 0
def ensure_volume(name: str) -> bool:
# Returns True if the volume was just created, False if it already existed.
if volume_exists(name):
return False
_run(["docker", "volume", "create", name])
return True
def _compose_args(app_dir: Path, project: str) -> list[str]:
return [
"docker",
"compose",
"--project-name",
project,
"--file",
str(app_dir / "docker-compose.yaml"),
]
def compose_up(app_dir: Path, project: str) -> None:
_run([*_compose_args(app_dir, project), "up", "--detach"], cwd=app_dir)
def compose_down(app_dir: Path, project: str) -> None:
_run([*_compose_args(app_dir, project), "down"], cwd=app_dir)
def compose_pull(app_dir: Path, project: str) -> None:
"""Fetch the latest image for every service in the compose file.
No-op for images already up to date. Network-bound can take seconds.
"""
_run([*_compose_args(app_dir, project), "pull"], cwd=app_dir)
def compose_image_tags(app_dir: Path, project: str) -> dict[str, str]:
"""Return {service_name: image_tag} as declared in the compose file.
Uses `docker compose config --format json` so we don't have to write a
YAML parser compose already resolves env vars and defaults for us.
"""
proc = _run(
[*_compose_args(app_dir, project), "config", "--format", "json"],
cwd=app_dir,
)
try:
cfg = json.loads(proc.stdout)
except json.JSONDecodeError as e:
raise DockerError(f"compose config: invalid JSON: {e}") from e
services = cfg.get("services") or {}
return {name: spec.get("image") for name, spec in services.items() if spec.get("image")}
def local_image_id(tag: str) -> str | None:
"""`sha256:…` image ID of the locally-stored image matching `tag`.
Returns None if the tag isn't pulled locally (normal before first pull).
"""
proc = _run(
["docker", "image", "inspect", tag, "--format", "{{.Id}}"],
check=False,
)
if proc.returncode != 0:
return None
return proc.stdout.strip() or None
def running_container_image_id(app_dir: Path, project: str, service: str) -> str | None:
"""`sha256:…` image ID the compose container for `service` was started from.
Returns None if the service isn't running. Compose may have multiple
replicas (scale > 1); in that case the first container's image ID wins,
which is fine for "is this service on the current image?" checks since
compose keeps replicas on the same image.
"""
proc = _run(
[*_compose_args(app_dir, project), "ps", "--quiet", service],
cwd=app_dir,
check=False,
)
if proc.returncode != 0:
return None
ids = [line for line in proc.stdout.splitlines() if line.strip()]
if not ids:
return None
inspect = _run(
["docker", "inspect", "--format", "{{.Image}}", ids[0]],
check=False,
)
if inspect.returncode != 0:
return None
return inspect.stdout.strip() or None

View file

@ -1,183 +0,0 @@
"""Local-CA HTTPS helpers for the `tls internal` setup.
Caddy generates the local root CA lazily on first start and keeps it under
$XDG_DATA_HOME/caddy/pki/authorities/local/ our packaged caddy.service
sets `XDG_DATA_HOME=/var/lib`, so on the target that resolves to
/var/lib/caddy/pki/authorities/local/. The private key stays 0600 /
caddy-owned; we only ever read the public root.crt next to it.
HTTPS is **opt-in** since 26.15-alpha. Default Caddyfile has no `:443`
site block, so `tls internal` never triggers cert issuance. The
/settings toggle drops a snippet file into /etc/caddy/furtka-https.d/
that adds the hostname+tls-internal block (plus the redirect snippet
inside /etc/caddy/furtka.d/ for HTTPHTTPS). Disabling the toggle
removes both snippets and reloads Caddy falls back to HTTP-only.
Why opt-in: fresh-install boxes used to always serve a self-signed
cert on :443. Any browser that had ever trusted a previous Furtka
box's local CA rejected the new cert with an unbypassable
SEC_ERROR_BAD_SIGNATURE Firefox in particular has no "Advanced →
Accept" for that case. Making HTTPS explicit means fresh installs
never hit that trap; users who want HTTPS download the rootCA.crt
first and then click the toggle.
This module exposes:
- status(): CA fingerprint + current toggle state
- set_force_https(enabled): write/remove BOTH snippets atomically,
reload Caddy, roll back on failure.
"""
import base64
import hashlib
import re
import subprocess
from pathlib import Path
CA_CERT_PATH = Path("/var/lib/caddy/pki/authorities/local/root.crt")
SNIPPET_DIR = Path("/etc/caddy/furtka.d")
REDIRECT_SNIPPET = SNIPPET_DIR / "redirect.caddyfile"
REDIRECT_CONTENT = "redir https://{host}{uri} permanent\n"
HTTPS_SNIPPET_DIR = Path("/etc/caddy/furtka-https.d")
HTTPS_SNIPPET = HTTPS_SNIPPET_DIR / "https.caddyfile"
HOSTNAME_FILE = Path("/etc/hostname")
_PEM_RE = re.compile(
r"-----BEGIN CERTIFICATE-----\s*(.+?)\s*-----END CERTIFICATE-----",
re.DOTALL,
)
class HttpsError(Exception):
"""Recoverable failure from set_force_https — the caller should 5xx."""
def _read_hostname(hostname_file: Path = HOSTNAME_FILE) -> str:
"""Return the box's hostname, stripped. Falls back to 'furtka' so a
missing /etc/hostname doesn't produce an empty site block that Caddy
would reject at parse time."""
try:
value = hostname_file.read_text().strip()
except (FileNotFoundError, PermissionError, OSError):
return "furtka"
return value or "furtka"
def _https_snippet_content(hostname: str) -> str:
"""Caddy site block the HTTPS toggle installs at opt-in.
Serves <hostname>.local and <hostname> on :443 with Caddy's
`tls internal` (local CA auto-issuance), and imports the shared
furtka_routes snippet so the :443 listener exposes the same
routes as :80. Must be written at top-level (not inside another
site block) that's why the Caddyfile imports furtka-https.d at
top-level rather than inside :80.
"""
return f"{hostname}.local, {hostname} {{\n\ttls internal\n\timport furtka_routes\n}}\n"
def _ca_fingerprint(ca_path: Path) -> str | None:
try:
pem = ca_path.read_text()
except (FileNotFoundError, PermissionError, IsADirectoryError):
return None
match = _PEM_RE.search(pem)
if not match:
return None
try:
der = base64.b64decode("".join(match.group(1).split()))
except (ValueError, base64.binascii.Error):
return None
return hashlib.sha256(der).hexdigest().upper()
def _format_fingerprint(hex_upper: str) -> str:
return ":".join(hex_upper[i : i + 2] for i in range(0, len(hex_upper), 2))
def status(
ca_path: Path = CA_CERT_PATH,
https_snippet: Path = HTTPS_SNIPPET,
) -> dict:
"""force_https is True iff the HTTPS listener snippet exists.
Before 26.15-alpha this checked the redirect snippet instead but
the redirect alone without a :443 listener wouldn't actually serve
HTTPS, so the listener snippet is the authoritative "HTTPS is on"
signal.
"""
fp = _ca_fingerprint(ca_path)
return {
"ca_available": fp is not None,
"fingerprint_sha256": _format_fingerprint(fp) if fp else None,
"force_https": https_snippet.is_file(),
"ca_download_url": "/rootCA.crt",
}
def _default_reload() -> None:
subprocess.run(
["systemctl", "reload", "caddy"],
check=True,
capture_output=True,
text=True,
)
def set_force_https(
enabled: bool,
snippet_dir: Path = SNIPPET_DIR,
snippet: Path = REDIRECT_SNIPPET,
https_snippet_dir: Path = HTTPS_SNIPPET_DIR,
https_snippet: Path = HTTPS_SNIPPET,
hostname_file: Path = HOSTNAME_FILE,
reload_caddy=_default_reload,
) -> bool:
"""Toggle HTTPS by writing or removing two snippets atomically:
1. The top-level HTTPS hostname+tls-internal block (enables :443
listener + Caddy's `tls internal` cert issuance)
2. The :80-scoped redirect snippet (forces HTTP HTTPS)
Reload Caddy after the snippet swap. On reload failure both
snippets are reverted to their pre-call state so a bad config
can't leave Caddy wedged.
"""
snippet_dir.mkdir(mode=0o755, parents=True, exist_ok=True)
https_snippet_dir.mkdir(mode=0o755, parents=True, exist_ok=True)
had_redirect = snippet.is_file()
previous_redirect = snippet.read_text() if had_redirect else None
had_https = https_snippet.is_file()
previous_https = https_snippet.read_text() if had_https else None
if enabled:
snippet.write_text(REDIRECT_CONTENT)
https_snippet.write_text(_https_snippet_content(_read_hostname(hostname_file)))
else:
if had_redirect:
snippet.unlink()
if had_https:
https_snippet.unlink()
try:
reload_caddy()
except subprocess.CalledProcessError as e:
_revert(snippet, previous_redirect)
_revert(https_snippet, previous_https)
msg = (e.stderr or e.stdout or "").strip() or f"exit {e.returncode}"
raise HttpsError(f"caddy reload failed: {msg}") from e
except FileNotFoundError as e:
_revert(snippet, previous_redirect)
_revert(https_snippet, previous_https)
raise HttpsError(f"systemctl not available: {e}") from e
return enabled
def _revert(snippet: Path, previous: str | None) -> None:
if previous is None:
try:
snippet.unlink()
except FileNotFoundError:
pass
else:
snippet.write_text(previous)

View file

@ -1,121 +0,0 @@
"""Background job for app installs — progress-visible via state file.
The slow part of installing an app is `docker compose pull` on a large
image (Jellyfin ~500 MB); without progress feedback, the UI modal sits
dead on "Installing…" for 30+ seconds and the user wonders if it hung.
This module mirrors the exact same shape as ``furtka.catalog`` and
``furtka.updater`` so the UI can poll an install just like it polls a
catalog sync or a self-update. The split is:
- ``furtka.api._do_install`` runs synchronously: resolve source, copy
the app folder, write .env, validate path settings + placeholders.
Those are fast, and their failures deserve an immediate 4xx so the
install modal can surface them in-line.
- After that the API writes an initial state file (stage
"pulling_image") and dispatches ``systemd-run --unit=furtka-install-
<name>`` to run ``furtka app install-bg <name>`` in the background.
That CLI subcommand is what calls ``run_install()`` here it does the
docker-facing phases and writes state transitions as it goes.
State file schema (``/var/lib/furtka/install-state.json``):
{
"stage": "pulling_image" | "creating_volumes"
| "starting_container" | "done" | "error",
"updated_at": "2026-04-21T17:30:45+0200",
"app": "jellyfin",
"version": "1.0.0", // added at "done"
"error": "details..." // added at "error"
}
Lock: ``/run/furtka/install.lock`` (tmpfs, reboot-safe). Global, not
per-app two parallel installs are not a v1 use-case and the lock
keeps the state-file representation simple (one in-flight install at
a time).
"""
from __future__ import annotations
import fcntl
import json
import os
import time
from pathlib import Path
from furtka import dockerops
from furtka.manifest import load_manifest
from furtka.paths import apps_dir
_INSTALL_STATE = Path(os.environ.get("FURTKA_INSTALL_STATE", "/var/lib/furtka/install-state.json"))
_LOCK_PATH = Path(os.environ.get("FURTKA_INSTALL_LOCK", "/run/furtka/install.lock"))
class InstallRunnerError(RuntimeError):
"""Any failure in the background install flow that should surface to the caller."""
def state_path() -> Path:
return _INSTALL_STATE
def lock_path() -> Path:
return _LOCK_PATH
def write_state(stage: str, **extra) -> None:
"""Atomic JSON state write — same shape as catalog/update-state."""
state_path().parent.mkdir(parents=True, exist_ok=True)
tmp = state_path().with_suffix(".tmp")
payload = {"stage": stage, "updated_at": time.strftime("%Y-%m-%dT%H:%M:%S%z"), **extra}
tmp.write_text(json.dumps(payload, indent=2))
tmp.replace(state_path())
def read_state() -> dict:
try:
return json.loads(state_path().read_text())
except (FileNotFoundError, json.JSONDecodeError):
return {}
def acquire_lock():
path = lock_path()
path.parent.mkdir(parents=True, exist_ok=True)
fh = path.open("w")
try:
fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError as e:
fh.close()
raise InstallRunnerError("another install is already in progress") from e
return fh
def run_install(name: str) -> None:
"""Docker-facing phases of the install: pull → volumes → compose up.
Called by the ``furtka app install-bg <name>`` CLI subcommand from the
systemd-run spawned by the API. Assumes the API has already run
``installer.install_from()``, so the app folder, .env, and manifest
are on disk at ``apps_dir() / <name>``.
Every phase transition is written to the state file for the UI to
poll. On exception the state flips to ``"error"`` with the message,
then the exception is re-raised so the CLI exits non-zero and
journald has a traceback.
"""
with acquire_lock():
target = apps_dir() / name
manifest = load_manifest(target / "manifest.json", expected_name=name)
try:
write_state("pulling_image", app=name)
dockerops.compose_pull(target, name)
write_state("creating_volumes", app=name)
for short in manifest.volumes:
dockerops.ensure_volume(manifest.volume_name(short))
write_state("starting_container", app=name)
dockerops.compose_up(target, name)
write_state("done", app=name, version=manifest.version)
except Exception as e:
write_state("error", app=name, error=str(e))
raise

View file

@ -1,319 +0,0 @@
import shutil
from pathlib import Path
from furtka import sources
from furtka.manifest import Manifest, ManifestError, load_manifest
from furtka.paths import apps_dir
# Values that an app's .env.example may use as obvious "fill me in" markers.
# If any of these reach the live .env, install refuses — otherwise we'd ship
# an SMB share with password "changeme" out of the box, which is the kind of
# default that ends up screenshotted on Hacker News.
PLACEHOLDER_SECRETS: frozenset[str] = frozenset({"changeme"})
# System paths that must never be accepted as a user-supplied `path`-type
# setting. The user is root on their own box, so this is about preventing
# accidental footguns (typing `/etc` when they meant `/mnt/etc`), not
# defending against an attacker. Matches exact paths and their subtrees
# after `Path.resolve()` — so `/mnt/../etc` also lands here.
DENIED_PATH_PREFIXES: tuple[str, ...] = (
"/etc",
"/root",
"/boot",
"/proc",
"/sys",
"/dev",
"/bin",
"/sbin",
"/usr/bin",
"/usr/sbin",
"/var/lib/furtka",
)
class InstallError(RuntimeError):
pass
def _placeholder_keys(env_path: Path) -> list[str]:
if not env_path.exists():
return []
bad: list[str] = []
for raw in env_path.read_text().splitlines():
line = raw.strip()
if not line or line.startswith("#") or "=" not in line:
continue
key, _, value = line.partition("=")
value = value.strip().strip('"').strip("'")
if value in PLACEHOLDER_SECRETS:
bad.append(key.strip())
return bad
def _is_denied_system_path(resolved: str) -> bool:
if resolved == "/":
return True
for bad in DENIED_PATH_PREFIXES:
if resolved == bad or resolved.startswith(bad + "/"):
return True
return False
def _path_setting_errors(m: Manifest, env_path: Path) -> list[str]:
"""Validate the filesystem paths named by `path`-type settings.
Returns one human-readable message per offending setting. Empty values
on non-required settings are allowed the required-field check in the
caller already refuses blanks on required fields before write.
"""
if not env_path.exists():
return []
values = _read_env(env_path)
errors: list[str] = []
for s in m.settings:
if s.type != "path":
continue
value = values.get(s.name, "")
if not value:
continue
p = Path(value)
if not p.is_absolute():
errors.append(f"{s.name}={value!r} must be an absolute path (start with /)")
continue
try:
resolved = p.resolve(strict=False)
except (OSError, RuntimeError) as e:
errors.append(f"{s.name}={value!r} cannot be resolved: {e}")
continue
if _is_denied_system_path(str(resolved)):
errors.append(f"{s.name}={value!r} resolves into a system path and is not allowed")
continue
if not resolved.exists():
errors.append(f"{s.name}={value!r} does not exist on this box")
continue
if not resolved.is_dir():
errors.append(f"{s.name}={value!r} is not a directory")
continue
return errors
def _format_env_value(v: str) -> str:
# Quote values that contain whitespace, quotes, or shell metacharacters so
# docker-compose's env substitution reads them back intact. Simple values
# stay unquoted to keep the file readable when a user SSHes in.
if v == "" or any(c in v for c in " \t\"'$`\\#"):
escaped = v.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped}"'
return v
def write_env(env_path: Path, values: dict[str, str]) -> None:
"""Write a KEY=VALUE .env file atomically with 0600 perms.
Preserves insertion order of `values` so the file reads in the same order
the user filled in the form.
"""
lines = [f"{k}={_format_env_value(v)}" for k, v in values.items()]
body = "\n".join(lines) + ("\n" if lines else "")
tmp = env_path.with_suffix(env_path.suffix + ".tmp")
tmp.write_text(body)
tmp.chmod(0o600)
tmp.replace(env_path)
def resolve_source(source: str) -> Path:
"""Resolve a `furtka app install <source>` arg to a real source folder.
If `source` looks like a path (or exists on disk), use it. Otherwise treat
it as an app name and look it up via `furtka.sources.resolve_app_name`
which checks the synced catalog first and falls back to the bundled seed.
"""
p = Path(source)
if p.is_dir():
return p
if "/" in source or source.startswith("."):
raise InstallError(f"{source!r} is not a directory")
resolved = sources.resolve_app_name(source)
if resolved is None:
raise InstallError(f"{source!r} not found as a path, catalog app, or bundled app")
return resolved.path
def install_from(src: Path, settings: dict[str, str] | None = None) -> Path:
"""Copy a validated app folder into /var/lib/furtka/apps/<name>/.
If `settings` is provided, the .env is written from those values (this is
what the Web UI / API does user fills in a form, values land here).
Otherwise, preserves an existing .env on upgrade and bootstraps from
.env.example on first install.
Refuses to finish (raises InstallError) if the resulting .env still has
placeholder secrets the target folder is left in place so the user can
edit and re-run.
Returns the target folder on success.
"""
manifest_path = src / "manifest.json"
if not manifest_path.exists():
raise InstallError(f"{src} has no manifest.json")
try:
m = load_manifest(manifest_path)
except ManifestError as e:
raise InstallError(str(e)) from e
if settings is not None:
declared = {s.name for s in m.settings}
for key in settings:
if key not in declared:
raise InstallError(f"{m.name}: unknown setting {key!r}")
target = apps_dir() / m.name
target.mkdir(parents=True, exist_ok=True)
for item in src.iterdir():
if not item.is_file():
continue
# Never overwrite an existing user .env — either settings-driven write
# or previous manual edit has authority.
if item.name == ".env" and (target / ".env").exists():
continue
shutil.copy2(item, target / item.name)
env = target / ".env"
env_example = target / ".env.example"
if settings is not None:
# Merge: start from existing .env (to preserve values the user didn't
# change — e.g. when editing a single password), overlay the submitted
# settings. Manifest-declared fields always appear in the final file.
existing = _read_env(env) if env.exists() else {}
merged: dict[str, str] = {}
for s in m.settings:
if s.name in settings:
merged[s.name] = settings[s.name]
elif s.name in existing:
merged[s.name] = existing[s.name]
elif s.default is not None:
merged[s.name] = s.default
else:
merged[s.name] = ""
# Preserve any non-manifest keys already in .env (forward-compat).
for k, v in existing.items():
if k not in merged:
merged[k] = v
# Required-field check runs on the merged view so that editing just
# one field (e.g. password) doesn't trip on unsubmitted fields that
# already have values in the existing .env.
for s in m.settings:
if s.required and not merged.get(s.name):
raise InstallError(f"{m.name}: setting {s.name!r} is required")
write_env(env, merged)
elif not env.exists() and env_example.exists():
# First install with no settings and no .env shipped: bootstrap from
# .env.example so compose has values to substitute.
shutil.copy2(env_example, env)
# .env carries app secrets — lock to root-only. Done before the placeholder
# check so even the half-installed state is at least not world-readable.
if env.exists():
env.chmod(0o600)
bad = _placeholder_keys(env)
if bad:
raise InstallError(
f"{m.name}: {env} still has placeholder values for {', '.join(bad)}. "
f"Open the app in the Furtka UI to fill in real values, or edit the "
f"file and re-run `furtka app install {m.name}`."
)
path_errors = _path_setting_errors(m, env)
if path_errors:
raise InstallError(f"{m.name}: {'; '.join(path_errors)}")
return target
def _read_env(env_path: Path) -> dict[str, str]:
"""Parse a simple KEY=VALUE .env into a dict. Unquotes quoted values."""
out: dict[str, str] = {}
for raw in env_path.read_text().splitlines():
line = raw.strip()
if not line or line.startswith("#") or "=" not in line:
continue
key, _, value = line.partition("=")
value = value.strip()
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
value = value[1:-1].replace('\\"', '"').replace("\\\\", "\\")
out[key.strip()] = value
return out
def read_env_values(env_path: Path) -> dict[str, str]:
"""Public wrapper — returns {} if the file doesn't exist."""
if not env_path.exists():
return {}
return _read_env(env_path)
def update_env(name: str, settings: dict[str, str]) -> Path:
"""Merge `settings` into the installed app's .env.
Preserves values the user didn't submit. Validates required fields against
the merged view. Leaves files/manifest untouched for already-installed
apps only. Returns the target folder; caller is expected to run
reconcile to restart the containers.
"""
target = apps_dir() / name
manifest_path = target / "manifest.json"
if not manifest_path.exists():
raise InstallError(f"{name!r} is not installed")
try:
m = load_manifest(manifest_path)
except ManifestError as e:
raise InstallError(str(e)) from e
declared = {s.name for s in m.settings}
for key in settings:
if key not in declared:
raise InstallError(f"{m.name}: unknown setting {key!r}")
env = target / ".env"
existing = _read_env(env) if env.exists() else {}
merged: dict[str, str] = {}
for s in m.settings:
if s.name in settings:
merged[s.name] = settings[s.name]
elif s.name in existing:
merged[s.name] = existing[s.name]
elif s.default is not None:
merged[s.name] = s.default
else:
merged[s.name] = ""
for k, v in existing.items():
if k not in merged:
merged[k] = v
for s in m.settings:
if s.required and not merged.get(s.name):
raise InstallError(f"{m.name}: setting {s.name!r} is required")
write_env(env, merged)
bad = _placeholder_keys(env)
if bad:
raise InstallError(f"{m.name}: {env} still has placeholder values for {', '.join(bad)}.")
path_errors = _path_setting_errors(m, env)
if path_errors:
raise InstallError(f"{m.name}: {'; '.join(path_errors)}")
return target
def remove(name: str) -> Path:
"""Delete /var/lib/furtka/apps/<name>/. Volumes are NOT touched.
Caller is responsible for stopping the compose project first.
"""
target = apps_dir() / name
if not target.exists():
raise InstallError(f"{name!r} is not installed")
shutil.rmtree(target)
return target

View file

@ -1,151 +0,0 @@
import json
import re
from dataclasses import dataclass, field
from pathlib import Path
REQUIRED_FIELDS = (
"name",
"display_name",
"version",
"description",
"volumes",
"ports",
"icon",
)
VALID_SETTING_TYPES = frozenset({"text", "password", "number", "path"})
SETTING_NAME_RE = re.compile(r"^[A-Z_][A-Z0-9_]*$")
class ManifestError(Exception):
pass
@dataclass(frozen=True)
class Setting:
name: str # env-var name, e.g. SMB_PASSWORD
label: str # human label shown in the UI
description: str # one-sentence help text under the input
type: str # "text" | "password" | "number"
required: bool
default: str | None
@dataclass(frozen=True)
class Manifest:
name: str
display_name: str
version: str
description: str
volumes: tuple[str, ...]
ports: tuple[int, ...]
icon: str
description_long: str = ""
settings: tuple[Setting, ...] = field(default_factory=tuple)
# Optional "Open" link for the landing page + installed-app row.
# `{host}` is substituted with the current browser hostname at render
# time so the URL follows whatever the user typed to reach Furtka —
# furtka.local, a raw IP, a future reverse-proxy hostname. Apps with
# no frontend (CLI-only, background workers) leave this empty.
open_url: str = ""
def volume_name(self, short: str) -> str:
# Namespace volume names so two apps can each declare e.g. "data"
# without colliding in `docker volume ls`.
if short not in self.volumes:
raise ManifestError(f"{self.name}: volume {short!r} not declared in manifest")
return f"furtka_{self.name}_{short}"
def _parse_settings(raw: object, manifest_path: Path) -> tuple[Setting, ...]:
if raw is None:
return ()
if not isinstance(raw, list):
raise ManifestError(f"{manifest_path}: settings must be a list")
out: list[Setting] = []
seen: set[str] = set()
for i, item in enumerate(raw):
if not isinstance(item, dict):
raise ManifestError(f"{manifest_path}: settings[{i}] must be an object")
name = item.get("name")
if not isinstance(name, str) or not SETTING_NAME_RE.match(name):
raise ManifestError(
f"{manifest_path}: settings[{i}].name must be an UPPER_SNAKE_CASE env-var name"
)
if name in seen:
raise ManifestError(f"{manifest_path}: settings has duplicate name {name!r}")
seen.add(name)
label = item.get("label", name)
description = item.get("description", "")
type_ = item.get("type", "text")
if type_ not in VALID_SETTING_TYPES:
valid = sorted(VALID_SETTING_TYPES)
raise ManifestError(f"{manifest_path}: settings[{name}].type must be one of {valid}")
required = bool(item.get("required", False))
default = item.get("default")
if default is not None and not isinstance(default, str):
default = str(default)
out.append(
Setting(
name=name,
label=str(label),
description=str(description),
type=type_,
required=required,
default=default,
)
)
return tuple(out)
def load_manifest(path: Path, expected_name: str | None = None) -> Manifest:
"""Parse and validate a manifest.json.
`expected_name` is used by the scanner (where the install location's folder
name IS the source of truth and must match the manifest). For loading from
arbitrary source folders during install, leave it None the manifest's own
`name` field decides the install target.
"""
try:
raw = json.loads(path.read_text())
except json.JSONDecodeError as e:
raise ManifestError(f"{path}: invalid JSON: {e}") from e
if not isinstance(raw, dict):
raise ManifestError(f"{path}: top-level must be an object")
missing = [f for f in REQUIRED_FIELDS if f not in raw]
if missing:
raise ManifestError(f"{path}: missing required fields: {', '.join(missing)}")
name = raw["name"]
if not isinstance(name, str) or not name:
raise ManifestError(f"{path}: name must be a non-empty string")
if expected_name is not None and name != expected_name:
raise ManifestError(f"{path}: name {name!r} must equal {expected_name!r}")
volumes = raw["volumes"]
if not isinstance(volumes, list) or not all(isinstance(v, str) and v for v in volumes):
raise ManifestError(f"{path}: volumes must be a list of non-empty strings")
ports = raw["ports"]
if not isinstance(ports, list) or not all(isinstance(p, int) for p in ports):
raise ManifestError(f"{path}: ports must be a list of integers")
settings = _parse_settings(raw.get("settings"), path)
open_url_raw = raw.get("open_url", "")
if not isinstance(open_url_raw, str):
raise ManifestError(f"{path}: open_url must be a string if set")
return Manifest(
name=name,
display_name=str(raw["display_name"]),
version=str(raw["version"]),
description=str(raw["description"]),
volumes=tuple(volumes),
ports=tuple(ports),
icon=str(raw["icon"]),
description_long=str(raw.get("description_long", "")),
settings=settings,
open_url=open_url_raw,
)

View file

@ -1,95 +0,0 @@
"""Stdlib-only password hashing, compatible with werkzeug's hash format.
Why this exists: 26.11-alpha introduced auth via ``werkzeug.security``,
but the target system doesn't have ``werkzeug`` installed (Core runs as
system Python with only the stdlib pyproject.toml's ``flask>=3.0``
dep is never pip-installed on the box). Fresh installs from a 26.11 /
26.12 ISO crashed on import; upgrades from pre-auth versions were
double-broken by that plus a too-strict updater health check.
Fix: replace werkzeug with stdlib equivalents using the same hash
**format** so existing ``users.json`` files created by 26.11 / 26.12 on
the rare boxes that happened to have werkzeug installed (Medion, .196
after manual pacman) still verify.
Format: ``<method>$<salt>$<hex digest>``
- ``pbkdf2:<hash>:<iterations>`` what we generate by default here
- ``scrypt:<N>:<r>:<p>`` what werkzeug's default produces
Both are implemented via ``hashlib`` which has been stdlib since 3.6.
"""
from __future__ import annotations
import hashlib
import hmac
import secrets
_PBKDF2_HASH = "sha256"
_PBKDF2_ITERATIONS = 600_000
_SALT_LEN = 16
def hash_password(password: str) -> str:
"""Return a ``pbkdf2:sha256:<iter>$<salt>$<hex>`` hash of *password*.
PBKDF2-SHA256 over UTF-8. 600k iterations same as werkzeug's
default in the 3.x series, roughly OWASP 2023's recommendation.
"""
if not isinstance(password, str):
raise TypeError("password must be str")
salt = secrets.token_urlsafe(_SALT_LEN)[:_SALT_LEN]
dk = hashlib.pbkdf2_hmac(
_PBKDF2_HASH, password.encode("utf-8"), salt.encode("utf-8"), _PBKDF2_ITERATIONS
)
return f"pbkdf2:{_PBKDF2_HASH}:{_PBKDF2_ITERATIONS}${salt}${dk.hex()}"
def verify_password(password: str, hashed: str) -> bool:
"""Constant-time verify *password* against a stored *hashed* value.
Accepts both our own pbkdf2 hashes and legacy werkzeug scrypt
hashes in ``scrypt:N:r:p$salt$hex`` form so users.json files
written by 26.11 / 26.12 keep working after upgrade.
"""
if not isinstance(password, str) or not isinstance(hashed, str):
return False
try:
method, salt, expected = hashed.split("$", 2)
except ValueError:
return False
parts = method.split(":")
if not parts:
return False
algo = parts[0]
pw_bytes = password.encode("utf-8")
salt_bytes = salt.encode("utf-8")
try:
if algo == "pbkdf2":
if len(parts) < 3:
return False
inner_hash = parts[1]
iterations = int(parts[2])
dk = hashlib.pbkdf2_hmac(inner_hash, pw_bytes, salt_bytes, iterations)
elif algo == "scrypt":
# werkzeug: scrypt:N:r:p, dklen=64, maxmem=132 MiB. Without
# the explicit maxmem we'd hit OpenSSL's default memory cap
# and throw ValueError on N >= 32768.
if len(parts) < 4:
return False
n = int(parts[1])
r = int(parts[2])
p = int(parts[3])
dk = hashlib.scrypt(
pw_bytes,
salt=salt_bytes,
n=n,
r=r,
p=p,
dklen=64,
maxmem=132 * 1024 * 1024,
)
else:
return False
except (ValueError, TypeError, OverflowError):
return False
return hmac.compare_digest(dk.hex(), expected)

View file

@ -1,46 +0,0 @@
import os
from pathlib import Path
DEFAULT_APPS_DIR = Path("/var/lib/furtka/apps")
# Bundled apps live alongside the Python package inside each versioned slot
# (/opt/furtka/versions/<ver>/apps/), reached via the /opt/furtka/current
# symlink. A flat /opt/furtka/apps path would break the Phase-2 self-update
# flow (symlink swap wouldn't move the bundled-app tree along with the code).
DEFAULT_BUNDLED_APPS_DIR = Path("/opt/furtka/current/apps")
# Catalog apps come from `furtka catalog sync` pulling the daniel/furtka-apps
# release tarball. Lives under /var/lib/furtka/ so it survives core self-
# updates — the resolver (furtka.sources) prefers it over the bundled seed.
DEFAULT_CATALOG_DIR = Path("/var/lib/furtka/catalog")
# Users / auth state. One JSON file keyed by role — today only "admin" exists.
# Lives under /var/lib/furtka/ so self-updates don't stomp it. Mode 0600 is
# enforced by furtka.auth.save_users (same atomic-write pattern as the app
# .env files).
DEFAULT_USERS_FILE = Path("/var/lib/furtka/users.json")
# Static-web asset dir served by the Python handler for / and
# /settings* so those pages pick up the auth-guard. Caddy also serves
# /style.css and other assets directly from here for the login page.
DEFAULT_STATIC_WWW = Path("/opt/furtka/current/assets/www")
def apps_dir() -> Path:
return Path(os.environ.get("FURTKA_APPS_DIR", DEFAULT_APPS_DIR))
def bundled_apps_dir() -> Path:
return Path(os.environ.get("FURTKA_BUNDLED_APPS_DIR", DEFAULT_BUNDLED_APPS_DIR))
def catalog_dir() -> Path:
return Path(os.environ.get("FURTKA_CATALOG_DIR", DEFAULT_CATALOG_DIR))
def catalog_apps_dir() -> Path:
return catalog_dir() / "apps"
def users_file() -> Path:
return Path(os.environ.get("FURTKA_USERS_FILE", DEFAULT_USERS_FILE))
def static_www_dir() -> Path:
return Path(os.environ.get("FURTKA_STATIC_WWW", DEFAULT_STATIC_WWW))

View file

@ -1,52 +0,0 @@
from dataclasses import dataclass
from pathlib import Path
from furtka import dockerops
from furtka.scanner import scan
@dataclass(frozen=True)
class Action:
kind: str # "ensure_volume" | "compose_up" | "skip"
target: str
detail: str = ""
def describe(self) -> str:
if self.detail:
return f"{self.kind:14s} {self.target} ({self.detail})"
return f"{self.kind:14s} {self.target}"
def reconcile(apps_root: Path, dry_run: bool = False) -> list[Action]:
"""Walk the apps tree and bring docker into the desired state.
Failures during one app's reconcile (Docker errors, missing binary, …) are
captured as Action(kind='error', ) and do NOT abort the whole sweep the
other apps still get reconciled. Callers inspect the returned actions to
decide overall success.
"""
actions: list[Action] = []
for result in scan(apps_root):
if not result.ok:
actions.append(Action("skip", result.path.name, result.error or ""))
continue
m = result.manifest
try:
for vol_short in m.volumes:
full = m.volume_name(vol_short)
actions.append(Action("ensure_volume", full))
if not dry_run:
dockerops.ensure_volume(full)
actions.append(Action("compose_up", m.name))
if not dry_run:
dockerops.compose_up(result.path, m.name)
except (dockerops.DockerError, FileNotFoundError, OSError) as e:
# Catch broad enough to cover: docker daemon down, docker binary
# missing on the box, compose file unreadable. Narrow enough that
# programmer errors (KeyError etc.) still surface.
actions.append(Action("error", m.name, str(e)))
return actions
def has_errors(actions: list[Action]) -> bool:
return any(a.kind == "error" for a in actions)

View file

@ -1,35 +0,0 @@
from dataclasses import dataclass
from pathlib import Path
from furtka.manifest import Manifest, ManifestError, load_manifest
@dataclass(frozen=True)
class ScanResult:
path: Path
manifest: Manifest | None
error: str | None
@property
def ok(self) -> bool:
return self.manifest is not None
def scan(apps_root: Path) -> list[ScanResult]:
if not apps_root.exists():
return []
out: list[ScanResult] = []
for entry in sorted(apps_root.iterdir()):
if not entry.is_dir():
continue
manifest_path = entry / "manifest.json"
if not manifest_path.exists():
out.append(ScanResult(path=entry, manifest=None, error="manifest.json missing"))
continue
try:
m = load_manifest(manifest_path, expected_name=entry.name)
except ManifestError as e:
out.append(ScanResult(path=entry, manifest=None, error=str(e)))
continue
out.append(ScanResult(path=entry, manifest=m, error=None))
return out

View file

@ -1,75 +0,0 @@
"""Single lookup layer for "where does app <name> live right now?".
Three origins an app folder can come from:
- ``catalog`` the daily-synced ``/var/lib/furtka/catalog/apps/`` tree
that ``furtka.catalog.sync_catalog`` maintains.
- ``bundled`` the seed ``/opt/furtka/current/apps/`` tree shipped
inside the core release tarball. Used for first-boot before any
catalog sync has run, and as the fallback when the catalog is stale,
missing, or doesn't know about this app.
- ``local`` an explicit directory path passed to ``furtka app install
/path/to/src``; bypasses this module entirely.
Catalog wins on collision. The precedence is deliberate when the user
pressed "Sync apps catalog" they want what they synced, not whatever the
core tarball happened to carry.
"""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from furtka.paths import bundled_apps_dir, catalog_apps_dir
@dataclass(frozen=True)
class AppSource:
path: Path
origin: str # "catalog" | "bundled" | "local"
def resolve_app_name(name: str) -> AppSource | None:
"""Return the source folder for a bundled/catalog app name.
Checks catalog first, then bundled seed. Presence is tested by
``manifest.json`` existing an empty folder or a stray ``.env``
won't register. Returns ``None`` if the name isn't known anywhere.
"""
cat = catalog_apps_dir() / name
if (cat / "manifest.json").is_file():
return AppSource(cat, "catalog")
bundled = bundled_apps_dir() / name
if (bundled / "manifest.json").is_file():
return AppSource(bundled, "bundled")
return None
def list_available() -> list[AppSource]:
"""Catalog bundled, catalog wins on name collision.
Each entry is a folder containing a manifest.json. Ordering is
alphabetical by folder name, which matches how the scanner sorts so
the UI list stays stable across sync/reboot.
"""
seen: dict[str, AppSource] = {}
cat_root = catalog_apps_dir()
if cat_root.is_dir():
for entry in sorted(cat_root.iterdir()):
if not entry.is_dir():
continue
if not (entry / "manifest.json").is_file():
continue
seen[entry.name] = AppSource(entry, "catalog")
bundled_root = bundled_apps_dir()
if bundled_root.is_dir():
for entry in sorted(bundled_root.iterdir()):
if not entry.is_dir():
continue
if entry.name in seen:
continue
if not (entry / "manifest.json").is_file():
continue
seen[entry.name] = AppSource(entry, "bundled")
return [seen[name] for name in sorted(seen)]

View file

@ -1,458 +0,0 @@
"""Furtka self-update logic.
The runtime layout (see also ``webinstaller/app.py`` slice 1b):
/opt/furtka/
versions/
26.0-alpha/ first install extracted here
26.1-alpha/ next version, after one update
current -> versions/26.1-alpha
This module handles the transition between versions. Flow:
1. ``check_update()`` queries the Forgejo releases API for the latest tag.
2. ``prepare_update()`` downloads the tarball + sha256 sidecar, verifies it,
extracts into ``/opt/furtka/versions/<ver>/_staging`` and moves it to
``versions/<ver>/`` on successful extract.
3. ``apply_update()`` flips ``/opt/furtka/current``, reloads caddy, and
restarts furtka-reconcile + furtka-api. Then health-checks the API. On
failure it flips the symlink back.
The full pipeline is wrapped in ``run_update()`` for the CLI, which also
writes stage-by-stage progress to ``/var/lib/furtka/update-state.json`` so
the web UI can poll progress without touching the (restarting) API.
Paths can be overridden via the ``FURTKA_ROOT`` env var so tests can point
the updater at a tmpdir.
"""
from __future__ import annotations
import fcntl
import json
import os
import shutil
import subprocess
import time
import urllib.error
import urllib.request
from dataclasses import dataclass
from pathlib import Path
from furtka import _release_common as _rc
FORGEJO_HOST = os.environ.get("FURTKA_FORGEJO_HOST", "forgejo.sourcegate.online")
FORGEJO_REPO = os.environ.get("FURTKA_FORGEJO_REPO", "daniel/furtka")
_FURTKA_ROOT = Path(os.environ.get("FURTKA_ROOT", "/opt/furtka"))
_STATE_DIR = Path(os.environ.get("FURTKA_STATE_DIR", "/var/lib/furtka"))
_CADDYFILE_LIVE = Path(os.environ.get("FURTKA_CADDYFILE_PATH", "/etc/caddy/Caddyfile"))
_CADDY_SNIPPET_DIR = Path(
os.environ.get("FURTKA_CADDY_SNIPPET_DIR", str(_CADDYFILE_LIVE.parent / "furtka.d"))
)
_CADDY_HTTPS_SNIPPET_DIR = Path(
os.environ.get("FURTKA_CADDY_HTTPS_SNIPPET_DIR", str(_CADDYFILE_LIVE.parent / "furtka-https.d"))
)
_SYSTEMD_DIR = Path(os.environ.get("FURTKA_SYSTEMD_DIR", "/etc/systemd/system"))
_HOSTNAME_FILE = Path(os.environ.get("FURTKA_HOSTNAME_FILE", "/etc/hostname"))
_CADDYFILE_HOSTNAME_MARKER = "__FURTKA_HOSTNAME__"
class UpdateError(RuntimeError):
"""Any failure in the update flow that should surface to the caller."""
def furtka_root() -> Path:
return _FURTKA_ROOT
def versions_dir() -> Path:
return furtka_root() / "versions"
def current_symlink() -> Path:
return furtka_root() / "current"
def state_path() -> Path:
return _STATE_DIR / "update-state.json"
def lock_path() -> Path:
return Path(os.environ.get("FURTKA_LOCK_PATH", "/run/furtka/update.lock"))
@dataclass(frozen=True)
class UpdateCheck:
current: str
latest: str
update_available: bool
tarball_url: str | None
sha256_url: str | None
def read_current_version() -> str:
"""Return the string in <current>/VERSION, or "dev" if it can't be read."""
try:
return (current_symlink() / "VERSION").read_text().strip() or "dev"
except (FileNotFoundError, NotADirectoryError, OSError):
return "dev"
def _forgejo_api(path: str) -> dict | list:
return _rc.forgejo_api(FORGEJO_HOST, FORGEJO_REPO, path, error_cls=UpdateError)
_version_tuple = _rc.version_tuple
def check_update() -> UpdateCheck:
"""Return current + latest versions and whether an update is available.
Forgejo's /releases/latest endpoint skips anything marked as a
pre-release, so during the CalVer alpha/beta stage where every tag
carries a suffix, that endpoint always 404s. Query the paginated
/releases list instead and take the first entry Forgejo returns
them newest-first, including pre-releases.
"""
current = read_current_version()
releases = _forgejo_api("/releases?limit=1")
if not isinstance(releases, list) or not releases:
raise UpdateError("no releases published yet")
release = releases[0]
latest = str(release.get("tag_name") or "").strip()
if not latest:
raise UpdateError("latest release has empty tag_name")
tarball_url = None
sha256_url = None
for asset in release.get("assets") or []:
name = asset.get("name") or ""
url = asset.get("browser_download_url") or ""
if name.endswith(".tar.gz") and "furtka-" in name:
tarball_url = url
elif name.endswith(".tar.gz.sha256"):
sha256_url = url
available = latest != current and _version_tuple(latest) > _version_tuple(current)
return UpdateCheck(
current=current,
latest=latest,
update_available=available,
tarball_url=tarball_url,
sha256_url=sha256_url,
)
def _download(url: str, dest: Path) -> None:
_rc.download(url, dest, error_cls=UpdateError)
_sha256_of = _rc.sha256_of
def verify_tarball(tarball: Path, expected_sha: str) -> None:
_rc.verify_tarball(tarball, expected_sha, error_cls=UpdateError)
def _parse_sha256_sidecar(text: str) -> str:
return _rc.parse_sha256_sidecar(text, error_cls=UpdateError)
def _extract_tarball(tarball: Path, dest: Path) -> str:
return _rc.extract_tarball(tarball, dest, error_cls=UpdateError)
def _current_hostname() -> str:
"""Read the box's hostname from /etc/hostname, falling back to 'furtka'.
Used to substitute the __FURTKA_HOSTNAME__ marker in the shipped Caddyfile
so Caddy's `tls internal` sees a real name to issue a leaf cert for.
"""
try:
name = _HOSTNAME_FILE.read_text().strip()
except (FileNotFoundError, PermissionError, OSError):
return "furtka"
return name or "furtka"
def _maybe_migrate_preserve_https() -> None:
"""26.14 → 26.15 migration: if the box already had the force-HTTPS
redirect snippet on disk, that means the user explicitly opted
into HTTPS under the old regime. Under the new opt-in regime,
HTTPS also requires a separate listener snippet write it here so
the user's HTTPS doesn't silently break when the Caddyfile refresh
removes the default hostname block.
"""
redirect_snippet = _CADDY_SNIPPET_DIR / "redirect.caddyfile"
https_snippet = _CADDY_HTTPS_SNIPPET_DIR / "https.caddyfile"
if not redirect_snippet.is_file() or https_snippet.is_file():
return
hostname = _current_hostname()
https_snippet.write_text(
f"{hostname}.local, {hostname} {{\n\ttls internal\n\timport furtka_routes\n}}\n"
)
def _refresh_caddyfile(source: Path) -> bool:
"""Copy the shipped Caddyfile to /etc/caddy/ iff it differs. Returns True
if the file changed (so caddy needs more than a bare reload).
Substitutes __FURTKA_HOSTNAME__ with the current hostname before comparing
and writing same rendering the webinstaller applies at install time, so
a self-update lands byte-identical content when nothing else changed.
"""
if not source.is_file():
return False
# Snippet dirs for the /api/furtka/https/force toggle. Pre-HTTPS
# installs don't have them; ensure both so the Caddyfile's glob
# imports can't trip an older Caddy on missing paths during the
# first reload. furtka-https.d is new in 26.15-alpha — older boxes
# upgrading across this version line won't have it on disk yet.
_CADDY_SNIPPET_DIR.mkdir(mode=0o755, parents=True, exist_ok=True)
_CADDY_HTTPS_SNIPPET_DIR.mkdir(mode=0o755, parents=True, exist_ok=True)
# Migration: pre-26.15 Caddyfile always served :443 via tls internal,
# so a box that had the "force HTTPS" redirect toggle ON relied on
# HTTPS being there implicitly. After this Caddyfile refresh the
# hostname block is gone, so the redirect would 301 to a dead :443.
# Preserve intent by writing the HTTPS listener snippet too.
_maybe_migrate_preserve_https()
rendered = source.read_text().replace(_CADDYFILE_HOSTNAME_MARKER, _current_hostname())
if _CADDYFILE_LIVE.is_file() and rendered == _CADDYFILE_LIVE.read_text():
return False
_CADDYFILE_LIVE.parent.mkdir(parents=True, exist_ok=True)
_CADDYFILE_LIVE.write_text(rendered)
return True
def _link_new_units(unit_dir: Path) -> list[str]:
"""`systemctl link` any unit file in unit_dir that isn't already symlinked
into /etc/systemd/system/. Returns the list of newly-linked unit names.
Newly-linked `.timer` units are additionally `systemctl enable`d so that
a self-update introducing a timer (e.g. 26.5 26.6 adding
furtka-catalog-sync.timer) activates it automatically the installer's
enable list only applies to fresh installs. A linked-but-disabled timer
never fires on its own, so without this step catalog sync would never
happen on upgraded boxes.
"""
if not unit_dir.is_dir():
return []
linked = []
for unit_file in sorted(unit_dir.iterdir()):
if unit_file.suffix not in (".service", ".timer"):
continue
target = _SYSTEMD_DIR / unit_file.name
if target.exists() or target.is_symlink():
continue
_run(["systemctl", "link", str(unit_file)])
if unit_file.suffix == ".timer":
_run(["systemctl", "enable", unit_file.name])
linked.append(unit_file.name)
return linked
def write_state(stage: str, **extra) -> None:
state_path().parent.mkdir(parents=True, exist_ok=True)
tmp = state_path().with_suffix(".tmp")
payload = {"stage": stage, "updated_at": time.strftime("%Y-%m-%dT%H:%M:%S%z"), **extra}
tmp.write_text(json.dumps(payload, indent=2))
tmp.replace(state_path())
def read_state() -> dict:
try:
return json.loads(state_path().read_text())
except (FileNotFoundError, json.JSONDecodeError):
return {}
def acquire_lock():
path = lock_path()
path.parent.mkdir(parents=True, exist_ok=True)
fh = path.open("w")
try:
fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError as e:
fh.close()
raise UpdateError("another update is already in progress") from e
return fh
def _run(cmd: list[str]) -> None:
proc = subprocess.run(cmd, capture_output=True, text=True, check=False)
if proc.returncode != 0:
raise UpdateError(
f"{' '.join(cmd)} exited {proc.returncode}: {(proc.stderr or proc.stdout).strip()}"
)
def _health_check(url: str, deadline_s: float = 30.0) -> bool:
"""Poll *url* until we get *any* response from the Python server.
Treats any 2xx-4xx response as "server is up". A 401 on
/api/apps after the 26.11-alpha auth-guard shipped is a perfectly
valid signal that the new code imported + the socket is listening
rejecting the request is still "alive". Only 5xx or connection-
level failures count as unhealthy.
Rationale: pre-26.13 this function hit /api/apps and expected 200,
which silently broke every upgrade across the auth boundary (26.10
26.11+) and auto-rolled back. Now we just need proof the new
process came up.
"""
end = time.time() + deadline_s
while time.time() < end:
try:
with urllib.request.urlopen(url, timeout=3) as resp:
# Any 2xx/3xx → alive. urllib follows redirects by
# default, so a 302 → /login resolves to /login's 200.
if resp.status < 500:
return True
except urllib.error.HTTPError as e:
# 4xx → server is up, just refused us (auth, bad request,
# whatever). Counts as healthy for the "did it come back"
# check. 5xx → genuinely broken, don't accept.
if 400 <= e.code < 500:
return True
except urllib.error.URLError:
# Connection refused / DNS / timeout → not up yet, retry.
pass
time.sleep(1)
return False
def prepare_update(check: UpdateCheck, download_dir: Path | None = None) -> tuple[Path, str]:
"""Download + verify the tarball. Returns (tarball_path, version)."""
if not check.tarball_url or not check.sha256_url:
raise UpdateError("release is missing tarball or sha256 asset")
dl_dir = download_dir or (_STATE_DIR / "updates")
dl_dir.mkdir(parents=True, exist_ok=True)
tarball = dl_dir / f"furtka-{check.latest}.tar.gz"
sha_file = dl_dir / f"furtka-{check.latest}.tar.gz.sha256"
write_state("downloading", latest=check.latest)
_download(check.tarball_url, tarball)
_download(check.sha256_url, sha_file)
write_state("verifying", latest=check.latest)
expected = _parse_sha256_sidecar(sha_file.read_text())
verify_tarball(tarball, expected)
return tarball, check.latest
def apply_update(tarball: Path, version: str) -> None:
"""Extract, flip the symlink, restart services. Raises on failure.
Caller is expected to have verified the sha256 already but we re-check
here against the on-disk file anyway (TOCTOU).
"""
current = current_symlink()
versions = versions_dir()
versions.mkdir(parents=True, exist_ok=True)
write_state("extracting", latest=version)
staging = versions / f"_staging-{version}"
if staging.exists():
shutil.rmtree(staging)
actual_version = _extract_tarball(tarball, staging)
if actual_version != version:
shutil.rmtree(staging, ignore_errors=True)
raise UpdateError(f"tarball VERSION ({actual_version}) doesn't match expected ({version})")
target = versions / version
if target.exists():
shutil.rmtree(target)
staging.rename(target)
# mktemp-style 700 default on the staging dir carries through the
# rename; Caddy (non-root) needs 755 to traverse /opt/furtka/current/.
target.chmod(0o755)
write_state("swapping", latest=version)
previous = None
if current.is_symlink():
previous = os.readlink(current)
current.unlink()
try:
current.symlink_to(target)
except OSError as e:
if previous:
current.symlink_to(previous)
raise UpdateError(f"symlink swap failed: {e}") from e
write_state("restarting", latest=version)
try:
# Copy new Caddyfile into /etc/caddy/ if the release changed routes.
# reload always runs afterwards to flush the file-handle cache so the
# symlink flip takes effect even when Caddyfile itself didn't change.
_refresh_caddyfile(target / "assets" / "Caddyfile")
_run(["systemctl", "reload", "caddy"])
# Pick up any new systemd unit files added by this release. Existing
# linked units don't need relinking — daemon-reload rereads them.
_link_new_units(target / "assets" / "systemd")
_run(["systemctl", "daemon-reload"])
_run(["systemctl", "try-restart", "furtka-reconcile.service"])
_run(["systemctl", "restart", "furtka-api.service"])
except UpdateError as e:
_rollback(previous, version, f"service restart failed: {e}")
raise
write_state("verifying", latest=version)
ok = _health_check("http://127.0.0.1:7000/api/apps", deadline_s=30.0)
if not ok:
_rollback(previous, version, "health check failed after restart")
raise UpdateError("new version failed health check — rolled back")
write_state("done", version=version)
def _rollback(previous_target: str | None, failed_version: str, reason: str) -> None:
current = current_symlink()
if previous_target:
if current.is_symlink():
current.unlink()
current.symlink_to(previous_target)
# Best-effort restart on the previous target — if it fails too the
# box is in a hard state, but we can only surface the reason.
subprocess.run(["systemctl", "restart", "furtka-api.service"], check=False)
write_state(
"rolled_back",
failed_version=failed_version,
restored_to=previous_target or "(none)",
reason=reason,
)
def run_update() -> UpdateCheck:
"""End-to-end user-initiated update. Blocks on the lock.
Returns the UpdateCheck so callers can see what happened. Re-raises
UpdateError on any failure; the state file records the stage.
"""
with acquire_lock():
check = check_update()
if not check.update_available:
write_state("done", version=check.current, note="already up to date")
return check
tarball, version = prepare_update(check)
apply_update(tarball, version)
return check
def rollback() -> str:
"""Roll back to the most recent non-current version slot. Returns the
version we rolled back to, or raises if nothing to roll back to."""
current = current_symlink()
if not current.is_symlink():
raise UpdateError("/opt/furtka/current is not a symlink — can't roll back")
current_target = Path(os.readlink(current)).name
slots = sorted(
(p.name for p in versions_dir().iterdir() if p.is_dir() and not p.name.startswith("_")),
key=_version_tuple,
reverse=True,
)
candidates = [s for s in slots if s != current_target]
if not candidates:
raise UpdateError("no other version slots available to roll back to")
target_name = candidates[0]
target = versions_dir() / target_name
current.unlink()
current.symlink_to(target)
subprocess.run(["systemctl", "daemon-reload"], check=False)
subprocess.run(["systemctl", "restart", "furtka-api.service"], check=False)
write_state("rolled_back_manual", restored_to=target_name)
return target_name

View file

@ -1,65 +0,0 @@
# Live ISO build
Builds a bootable Arch-based live ISO that auto-starts the Flask webinstaller from `../webinstaller/` on boot. User plugs in a USB, boots, and the installer wizard comes up on `http://proksi.local:5000` (or the raw IP shown on the console).
Runnable locally (below) or through Forgejo Actions — `.forgejo/workflows/build-iso.yml` builds on every push to `main` and on manual `workflow_dispatch`. The ISO lands as an artifact named `furtka-iso`, retained for 14 days. Feature branches don't trigger the ISO build; see `memory/project_ci_branching` for why.
## Run a build locally
Needs a host with Docker. Disk space required: ~15 GB scratch during the build, ~1.5 GB for the final ISO.
```bash
./iso/build.sh
```
Output ISO ends up in `iso/out/furtka-<date>-x86_64.iso`. Around 310 min on a 4-core VM. First run is slower because it pulls `archlinux:latest` and all packages from upstream.
The script re-execs itself inside a privileged `archlinux:latest` container. That's so `mkarchiso` has root + loop-mount access without polluting the host — Ubuntu hosts don't ship archiso natively anyway.
## What gets baked in
The build starts from Arch's stock `releng` profile (the same one used to build the official Arch ISO), then overlays our customizations from `overlay/`:
| Overlay file | Effect |
|----------------------------------------------|----------------------------------------------------------------------------------|
| `overlay/packages.extra` | Appended to the package list. Adds `python`, `python-flask`, `avahi`, `nss-mdns` |
| `overlay/profiledef.sh` | Appended to `profiledef.sh`. Renames the ISO to `furtka-*` with a dated version |
| `overlay/airootfs/opt/furtka/` | Directory where `webinstaller/` is copied at build time |
| `overlay/airootfs/etc/hostname` | Live-ISO hostname (`proksi`) so mDNS advertises the installer as `proksi.local` |
| `overlay/airootfs/etc/issue` | Welcome banner on the TTY pointing users at `http://proksi.local:5000` |
| `overlay/airootfs/usr/local/bin/furtka-update-issue` | Rewrites `/etc/issue` at runtime so the banner also shows the DHCP-assigned IP as a fallback URL |
| `overlay/airootfs/etc/systemd/system/` | `furtka-webinstaller.service` (Flask on :5000) + `furtka-issue.service` (runs the banner-updater on network-online), each symlinked into `multi-user.target.wants/` to auto-start on boot |
The systemd service runs `flask --app app run --host 0.0.0.0 --port 5000` under `/opt/furtka`. The `0.0.0.0` binding is important — the Flask default is localhost-only, which wouldn't be reachable from another machine on the LAN.
mDNS is wired: `avahi-daemon` + `nss-mdns` come from `packages.extra`, the live ISO's hostname is `proksi`, and as soon as `systemd-networkd-wait-online` fires the installer is reachable at `http://proksi.local:5000`. The raw IP still shows on the console for fallback — some Windows clients need the Bonjour service for `.local` to resolve at all.
## Test flow
1. Build: `./iso/build.sh`
2. Copy the ISO to your Proxmox host's ISO storage (typically `/var/lib/vz/template/iso/`). Browser uploads of 1.5 GB truncate silently — prefer `scp` over the Proxmox WebUI.
3. Create a VM with:
- 2 vCPU, 4 GB RAM, 20 GB disk (empty)
- **BIOS: OVMF (UEFI)**, add EFI Disk on `local-lvm`. SeaBIOS fails to load `ldlinux.c32` from our ISO; only the UEFI path works reliably.
- **Secure Boot disabled**. Our GRUB isn't signed, so Secure Boot rejects it with `Access Denied`. Either boot into OVMF setup (Esc during boot) → Device Manager → Secure Boot Configuration → Attempt Secure Boot [ ] → F10 → reboot. Or remove the EFI Disk and re-add it with "Pre-Enroll keys" unchecked.
- CD-ROM attached with the Furtka ISO
- Boot order: CD before disk
- Network: same bridge as your LAN, DHCP
4. Start the VM. Wait ~30 s for boot.
5. Find its IP in Proxmox's VM summary (or your router's DHCP table)
6. Open `http://<vm-ip>:5000` — the existing 3-screen wizard should be there
## What you see after install + reboot
Once `archinstall` finishes and you click **Reboot now**, the VM comes up into the installed system. No more port `:5000` — the wizard ISO is gone. Instead:
- **Console**: agetty shows `Furtka is ready. Open http://<hostname>.local …` with the IP fallback underneath.
- **Browser** at `http://<hostname>.local` (default `http://furtka.local` — the form's default hostname is `furtka`; only the live-installer ISO uses `proksi`): Caddy-served landing page with three live status tiles (uptime, Docker version, free disk) refreshed every 30 s by `furtka-status.timer`. HTTPS is opt-in (26.15-alpha) — flip the toggle in `/settings` to switch on Caddy's `tls internal` on `:443`, then trust `rootCA.crt` from `/settings` to clear browser warnings.
- **SSH**: `ssh <user>@<hostname>.local` works; `docker ps` works without `sudo` because the user is in the `docker` group.
This is a demo shell — no Authentik, no app store yet. The landing page lives at `/srv/furtka/www/`, served by Caddy on `:80` per `/etc/caddy/Caddyfile`. All of this is written into the target by `webinstaller/app.py`'s `_post_install_commands` via archinstall's `custom_commands`.
## Known rough edges
- **Disk space**: the first time you build on a fresh host, the squashfs/xorriso steps need ~15 GB free. If the host's LVM-root is smaller, `xorriso` silently dies at the very end with "Image size exceeds free space on media".
- **Live-installer wizard is still HTTP-only**. `http://proksi.local:5000` during install has no TLS; once the box reboots, Caddy can serve `tls internal` on `:443` if the user opts in via `/settings` (26.15-alpha), but bringing TLS to the wizard itself is a later milestone.

View file

@ -1,119 +0,0 @@
#!/usr/bin/env bash
# Build a Furtka live ISO.
#
# From the repo root or from iso/ on any host with Docker:
# ./iso/build.sh
#
# The build runs inside a privileged `archlinux:latest` container because
# mkarchiso needs root + loop mounts + an Arch package manager, which
# Ubuntu doesn't provide natively. Output ISO goes to iso/out/.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
OUT_DIR="$SCRIPT_DIR/out"
if [[ "${FURTKA_ISO_INNER:-0}" != "1" ]]; then
mkdir -p "$OUT_DIR"
echo "==> Launching build container"
exec docker run --rm --privileged \
-v "$REPO_ROOT:/work" \
-w /work \
-e FURTKA_ISO_INNER=1 \
archlinux:latest \
bash /work/iso/build.sh
fi
# ---- inside the container from here on ----
echo "==> Syncing pacman, installing archiso"
pacman -Syu --noconfirm --needed archiso
PROFILE_SRC="/usr/share/archiso/configs/releng"
PROFILE_WORK="/tmp/furtka-profile"
BUILD_WORK="/tmp/furtka-build"
OUT_IN_CONTAINER="/work/iso/out"
rm -rf "$PROFILE_WORK" "$BUILD_WORK"
cp -a "$PROFILE_SRC" "$PROFILE_WORK"
echo "==> Overlaying Furtka customizations"
cat "$SCRIPT_DIR/overlay/packages.extra" >> "$PROFILE_WORK/packages.x86_64"
cat "$SCRIPT_DIR/overlay/profiledef.sh" >> "$PROFILE_WORK/profiledef.sh"
cp -a "$SCRIPT_DIR/overlay/airootfs/." "$PROFILE_WORK/airootfs/"
echo "==> Rebranding boot menu (GRUB + syslinux + systemd-boot)"
# releng ships menu entries labelled "Arch Linux install medium" across three
# bootloader configs (BIOS syslinux, GRUB, systemd-boot for UEFI). Rewrite to
# our brand. Done with sed (not a static overlay) so upstream archiso file
# moves don't silently leave stale Arch labels behind.
#
# Also rebrands the syslinux menu header ("MENU TITLE Arch Linux") and the
# per-entry HELP text shown at the bottom of the BIOS screen. GRUB/efiboot
# don't ship equivalent long descriptions, so menu-entry rename is enough there.
find "$PROFILE_WORK/grub" "$PROFILE_WORK/syslinux" "$PROFILE_WORK/efiboot" \
-type f \( -name "*.cfg" -o -name "*.conf" \) -print0 \
| xargs -0 sed -i \
-e 's/Arch Linux install medium/Furtka Live Installer/g' \
-e 's/Arch Linux live medium/Furtka Live Installer/g' \
-e 's/install Arch Linux or perform system maintenance/install Furtka or perform system maintenance/g' \
-e 's/^MENU TITLE Arch Linux$/MENU TITLE Furtka/'
# Mark the default entry as (Recommended) so first-time users know which to
# pick. Targets the main entry only — speech/accessibility variants stay
# unlabeled to avoid suggesting they're the normal choice.
sed -i 's/^title Furtka Live Installer (%ARCH%, UEFI)$/title (Recommended) Furtka Live Installer (%ARCH%, UEFI)/' \
"$PROFILE_WORK/efiboot/loader/entries/01-archiso-linux.conf"
sed -i 's/^MENU LABEL Furtka Live Installer (%ARCH%, BIOS)$/MENU LABEL (Recommended) Furtka Live Installer (%ARCH%, BIOS)/' \
"$PROFILE_WORK/syslinux/archiso_sys-linux.cfg"
sed -i "/--id 'archlinux'/s/menuentry \"Furtka Live Installer/menuentry \"(Recommended) Furtka Live Installer/" \
"$PROFILE_WORK/grub/grub.cfg" "$PROFILE_WORK/grub/loopback.cfg"
mkdir -p "$PROFILE_WORK/airootfs/opt/furtka"
cp -a "$REPO_ROOT/webinstaller/." "$PROFILE_WORK/airootfs/opt/furtka/"
# Ship the post-install asset tree (HTML, CSS, systemd units, scripts, …)
# next to webinstaller/app.py so _resolve_assets_dir() finds it at runtime.
cp -a "$REPO_ROOT/assets" "$PROFILE_WORK/airootfs/opt/furtka/assets"
rm -rf "$PROFILE_WORK/airootfs/opt/furtka/__pycache__"
# VERSION next to the webinstaller so the wizard footer can render the
# release string at runtime instead of carrying a hardcoded one. Matches
# what the resource-manager payload ships in its own VERSION file below.
ISO_VERSION=$(grep -E '^version = ' "$REPO_ROOT/pyproject.toml" | head -1 | sed 's/.*= "\(.*\)"/\1/')
echo "$ISO_VERSION" > "$PROFILE_WORK/airootfs/opt/furtka/VERSION"
# Pack the resource manager (furtka/ Python package + bundled apps/) as a
# tarball that webinstaller hands to archinstall via custom_commands. Lives at
# a fixed path in the live ISO; the installed system reads it back, untars
# into /opt/furtka/versions/<VERSION>/, and gets a working `furtka` CLI + the
# fileshare app. Same tarball shape as Phase-2 self-update releases, so an
# ISO-installed box and an updated box converge on the same layout.
echo "==> Bundling resource manager payload"
PAYLOAD_STAGE="$(mktemp -d)"
cp -a "$REPO_ROOT/furtka" "$PAYLOAD_STAGE/"
cp -a "$REPO_ROOT/apps" "$PAYLOAD_STAGE/"
# assets/ ships at the tarball root (not inside the Python package) because
# Caddy, systemd, and the updater all expect it at /opt/furtka/current/assets/.
cp -a "$REPO_ROOT/assets" "$PAYLOAD_STAGE/"
find "$PAYLOAD_STAGE" -type d -name __pycache__ -exec rm -rf {} +
# VERSION at tarball root: the installer reads it to choose the versions/<ver>/
# directory name and /opt/furtka/current/VERSION reports it at runtime. Same
# value we wrote into /opt/furtka/VERSION for the live wizard footer above.
echo "$ISO_VERSION" > "$PAYLOAD_STAGE/VERSION"
tar -czf "$PROFILE_WORK/airootfs/opt/furtka-resource-manager.tar.gz" \
-C "$PAYLOAD_STAGE" .
rm -rf "$PAYLOAD_STAGE"
mkdir -p "$PROFILE_WORK/airootfs/etc/systemd/system/avahi-daemon.service.d"
ln -sf /usr/lib/systemd/system/avahi-daemon.service \
"$PROFILE_WORK/airootfs/etc/systemd/system/multi-user.target.wants/avahi-daemon.service"
echo "==> Building ISO (mkarchiso)"
mkdir -p "$OUT_IN_CONTAINER"
mkarchiso -v -w "$BUILD_WORK" -o "$OUT_IN_CONTAINER" "$PROFILE_WORK"
echo
echo "==> Done. ISO(s) in $OUT_IN_CONTAINER (on host: iso/out/):"
ls -lh "$OUT_IN_CONTAINER"

View file

@ -1 +0,0 @@
proksi

View file

@ -1,6 +0,0 @@
Furtka Live Installer starting…
Once ready, open http://proksi.local:5000 on another device
on your network. The exact URL will appear below.

View file

@ -1,12 +0,0 @@
[Unit]
Description=Write Furtka /etc/issue with current IP for the console welcome
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/furtka-update-issue
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View file

@ -1,22 +0,0 @@
[Unit]
Description=Furtka Live Installer (Flask)
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
WorkingDirectory=/opt/furtka
# Python 3.14's new pathlib.Path.copy() refuses source==target paths, and
# archinstall's sync_log_to_install_medium() trips on that at __exit__
# because the chroot bindmount is already gone by the time it copies
# /var/log/archinstall/install.log. The install itself has already
# succeeded; this just means the log-sync step throws a misleading
# traceback. Neutralise the call on the live ISO (never runs on the
# target) so users don't see it. Idempotent — second run is a no-op.
ExecStartPre=/bin/sh -c 'sed -i "s|absolute_logfile\\.copy(logfile_target, preserve_metadata=True)|None # furtka patch: py3.14 Path.copy same-path workaround|" /usr/lib/python*/site-packages/archinstall/lib/installer.py || true'
ExecStart=/usr/bin/python -m flask --app app run --host 0.0.0.0 --port 5000
Restart=on-failure
RestartSec=3
[Install]
WantedBy=multi-user.target

View file

@ -1,23 +0,0 @@
#!/bin/bash
# Regenerates /etc/issue so the live-ISO console tells the user which URL
# to open in their browser. Shows proksi.local (via avahi/mDNS) as the
# preferred URL and the raw IP as a fallback for networks where mDNS
# doesn't work. Reload at the end nudges agetty to redraw.
set -e
ip=$(ip -4 -o addr show scope global 2>/dev/null | awk '{print $4}' | cut -d/ -f1 | head -1)
{
echo
echo " Open Furtka in a browser on another device on your network:"
echo
echo " http://proksi.local:5000 (easy — try this first)"
if [ -n "$ip" ]; then
echo " http://${ip}:5000 (fallback if the first doesn't work)"
fi
echo
echo " Then follow the wizard to install Furtka on this machine."
echo
} > /etc/issue
agetty --reload 2>/dev/null || true

View file

@ -1,4 +0,0 @@
python
python-flask
avahi
nss-mdns

View file

@ -1,9 +0,0 @@
#!/usr/bin/env bash
# Overrides for releng's profiledef.sh — only the fields we want to change.
# build.sh sources releng's original first, then this file, so these win.
iso_name="furtka"
iso_label="FURTKA_$(date +%Y%m)"
iso_publisher="Furtka <https://furtka.org>"
iso_application="Furtka Live Installer"
iso_version="$(date +%Y.%m.%d)"

View file

@ -1,46 +0,0 @@
#!/usr/bin/env bash
# Install Docker Engine + Compose plugin on a fresh Ubuntu 24.04 VM
# and prepare it to host a Forgejo Actions runner.
#
# Run as the target user (needs sudo). Idempotent.
set -euo pipefail
if [[ "$(. /etc/os-release && echo "$ID")" != "ubuntu" ]]; then
echo "This script targets Ubuntu. Aborting." >&2
exit 1
fi
echo "==> Updating apt and installing prerequisites"
sudo apt-get update -y
# arp-scan + iputils: needed by scripts/smoke-vm.sh for MAC→IP discovery
# of the test VM on the Proxmox test host (live ISO has no guest agent,
# so we scan the LAN and match on the MAC we assigned at VM creation).
sudo apt-get install -y ca-certificates curl gnupg arp-scan iputils-arping
echo "==> Adding Docker's official GPG key"
sudo install -m 0755 -d /etc/apt/keyrings
if [[ ! -f /etc/apt/keyrings/docker.asc ]]; then
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
fi
echo "==> Adding Docker apt repository"
ARCH="$(dpkg --print-architecture)"
CODENAME="$(. /etc/os-release && echo "$VERSION_CODENAME")"
echo "deb [arch=${ARCH} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu ${CODENAME} stable" \
| sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
echo "==> Installing Docker Engine + Compose plugin"
sudo apt-get update -y
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
echo "==> Adding $USER to docker group"
sudo usermod -aG docker "$USER"
echo "==> Enabling docker service"
sudo systemctl enable --now docker
echo
echo "Done. Log out and back in (or run 'newgrp docker') so group membership takes effect."
docker --version
docker compose version

View file

@ -1,31 +0,0 @@
services:
runner:
image: code.forgejo.org/forgejo/runner:6
container_name: forgejo-runner
restart: unless-stopped
# Running as root so (1) apk can install nodejs + docker-cli at
# startup (needed by host-mode jobs that execute JS actions and by
# `iso/build.sh` which shells out to `docker run`), and (2) access
# to the host docker socket doesn't require group juggling.
user: "0:0"
environment:
- DOCKER_HOST=unix:///var/run/docker.sock
- CONFIG_FILE=/data/config.yml
# Mount at /data so the container's data path matches the host path
# /data (which is a symlink to this directory — see runner-setup.md).
# When a host-mode job does `docker run -v /data/.cache/act/…:/work`,
# host docker resolves the source via the symlink instead of failing
# with "no such file or directory".
volumes:
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
# Auto-deploy of furtka.org runs inside this container — the
# runner host *is* the web server. Bind these at matching paths
# so rsync/hugo just see plain local filesystem. Without these
# mounts, .forgejo/workflows/deploy-site.yml can't reach the
# source tree or the webroot.
- /srv/furtka-site:/srv/furtka-site
- /var/www/furtka.org:/var/www/furtka.org
command: >-
/bin/sh -c "apk add --no-cache nodejs docker-cli && sleep 5 &&
forgejo-runner daemon --config /data/config.yml"

View file

@ -1,45 +0,0 @@
log:
level: debug
job_level: info
runner:
file: .runner
capacity: 1
timeout: 3h
insecure: false
fetch_timeout: 5s
fetch_interval: 2s
report_interval: 1s
# Label mappings decide how each `runs-on:` value is executed. The
# `:host` suffix means "run steps directly in the runner container"
# (no wrapping job container). build-iso uses `runs-on: self-hosted`
# because its `docker run -v $REPO_ROOT:/work` needs host-visible
# paths — nested containers would put the workspace in a namespace
# host docker can't see.
labels:
- "ubuntu-latest:docker://catthehacker/ubuntu:act-latest"
- "docker:docker://catthehacker/ubuntu:act-latest"
- "self-hosted:host"
cache:
enabled: true
dir: ""
host: ""
port: 0
proxy_port: 0
container:
network: ""
privileged: false
# Docker-outside-of-docker: runner and all job containers share the
# host's docker daemon via the unix socket. valid_volumes whitelists
# the socket so it can be mounted into job containers (the runner
# handles this automatically — don't also mount it from a workflow
# or you'll get "duplicate mount point").
valid_volumes:
- "/var/run/docker.sock"
docker_host: "unix:///var/run/docker.sock"
force_pull: false
host:
workdir_parent:

View file

@ -1,56 +0,0 @@
#!/usr/bin/env bash
# Apply branch protection rules to Furtka's main branch on Forgejo.
# Idempotent — re-runs safely (creates on first call, patches thereafter).
#
# Requires: jq, curl, and a Forgejo personal access token with repo write
# scope. Token is read from $FORGEJO_TOKEN, or extracted from the local
# git remote URL as a fallback.
#
# Usage:
# FORGEJO_TOKEN=... ./ops/forgejo/apply-branch-protection.sh
set -euo pipefail
HOST="${FORGEJO_HOST:-forgejo.sourcegate.online}"
REPO="${FORGEJO_REPO:-daniel/furtka}"
BRANCH="${FORGEJO_BRANCH:-main}"
if [ -z "${FORGEJO_TOKEN:-}" ]; then
FORGEJO_TOKEN=$(git config --get remote.origin.url \
| sed -nE 's|https://[^:]+:([^@]+)@.*|\1|p')
fi
if [ -z "${FORGEJO_TOKEN:-}" ]; then
echo "error: set FORGEJO_TOKEN or configure a token in remote.origin.url" >&2
exit 1
fi
here=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)
config="$here/branch-protection.json"
base="https://${HOST}/api/v1/repos/${REPO}/branch_protections"
api() {
curl --silent --show-error --fail-with-body \
--header "Authorization: token ${FORGEJO_TOKEN}" \
--header "Content-Type: application/json" \
"$@"
}
code=$(curl --silent --output /dev/null --write-out '%{http_code}' \
--header "Authorization: token ${FORGEJO_TOKEN}" \
"${base}/${BRANCH}")
case "$code" in
200)
echo "Updating existing protection on ${BRANCH}"
api --request PATCH "${base}/${BRANCH}" --data @"$config" | jq .
;;
404)
echo "Creating protection on ${BRANCH}"
body=$(jq --arg b "$BRANCH" '. + {branch_name: $b}' "$config")
api --request POST "${base}" --data "$body" | jq .
;;
*)
echo "unexpected HTTP ${code} from ${base}/${BRANCH}" >&2
exit 1
;;
esac

View file

@ -1,15 +0,0 @@
{
"enable_push": true,
"enable_push_whitelist": true,
"push_whitelist_usernames": ["daniel"],
"enable_status_check": true,
"status_check_contexts": [
"CI / lint*",
"CI / test*",
"CI / validate-json*"
],
"required_approvals": 0,
"block_on_rejected_reviews": false,
"block_on_outdated_branch": false,
"require_signed_commits": false
}

View file

@ -1,45 +0,0 @@
server {
listen 80;
listen [::]:80;
server_name furtka.org www.furtka.org;
root /var/www/furtka.org;
index index.html;
charset utf-8;
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_comp_level 6;
gzip_types
text/css
text/plain
text/xml
application/javascript
application/json
application/xml
application/rss+xml
application/atom+xml
image/svg+xml
font/woff
font/woff2;
location / {
try_files $uri $uri/ $uri.html =404;
}
location = /favicon.svg {
access_log off;
log_not_found off;
expires 7d;
}
location ~* \.(css|js|svg|woff2?|png|jpg|jpeg|webp|avif)$ {
access_log off;
expires 30d;
add_header Cache-Control "public, immutable";
}
error_page 404 /404.html;
}

View file

@ -1,27 +0,0 @@
#!/usr/bin/env bash
# One-time setup on forge-runner-01 for furtka.org.
# Idempotent — safe to re-run.
#
# Usage (on the VM, with sudo):
# sudo ops/nginx/setup-vm.sh
set -euo pipefail
OWNER="${SUDO_USER:-daniel}"
WEBROOT="/var/www/furtka.org"
SRCROOT="/srv/furtka-site"
SITE_CONF="/etc/nginx/sites-available/furtka.org"
SITE_LINK="/etc/nginx/sites-enabled/furtka.org"
install -d -o "$OWNER" -g "$OWNER" -m 0755 "$WEBROOT"
install -d -o "$OWNER" -g "$OWNER" -m 0755 "$SRCROOT"
cp "$(dirname "$0")/furtka.org.conf" "$SITE_CONF"
ln -sfn "$SITE_CONF" "$SITE_LINK"
# Drop the Ubuntu default site so it doesn't shadow us on :80.
rm -f /etc/nginx/sites-enabled/default
nginx -t
systemctl reload nginx
echo "OK: furtka.org ready at $WEBROOT (owner $OWNER)"

View file

@ -1,6 +1,6 @@
[project] [project]
name = "furtka" name = "homebase"
version = "26.15-alpha" version = "26.0-alpha"
description = "Open-source home server OS — simple enough for everyone." description = "Open-source home server OS — simple enough for everyone."
requires-python = ">=3.11" requires-python = ">=3.11"
readme = "README.md" readme = "README.md"
@ -37,10 +37,7 @@ select = [
[tool.pytest.ini_options] [tool.pytest.ini_options]
testpaths = ["tests"] testpaths = ["tests"]
pythonpath = ["webinstaller", "."] pythonpath = ["webinstaller"]
[project.scripts]
furtka = "furtka.cli:main"
[tool.setuptools] [tool.setuptools]
packages = ["furtka"] py-modules = []

View file

@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Build a Furtka release tarball + sha256 sidecar + release.json metadata.
#
# Usage: ./scripts/build-release-tarball.sh <version>
#
# Produces (in ./dist/):
# furtka-<version>.tar.gz contents extract to /opt/furtka/versions/<version>/
# furtka-<version>.tar.gz.sha256 single-line sha256 (<hash> <name>)
# release.json {"version","sha256","size","created_at"}
#
# The tarball shape matches what iso/build.sh ships in the live ISO: a
# VERSION file at the root, plus furtka/ and apps/ trees. Self-update on a
# running box downloads this tarball, verifies the sha256, stages to
# /opt/furtka/versions/<version>/, and flips /opt/furtka/current to it.
set -euo pipefail
VERSION="${1:?usage: $0 <version>}"
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
DIST_DIR="$REPO_ROOT/dist"
STAGE="$(mktemp -d)"
trap 'rm -rf "$STAGE"' EXIT
cp -a "$REPO_ROOT/furtka" "$STAGE/"
cp -a "$REPO_ROOT/apps" "$STAGE/"
# assets/ ships at the tarball root — Caddy + systemd + updater resolve
# everything from /opt/furtka/current/assets/, never from inside the package.
cp -a "$REPO_ROOT/assets" "$STAGE/"
find "$STAGE" -type d -name __pycache__ -exec rm -rf {} +
echo "$VERSION" > "$STAGE/VERSION"
mkdir -p "$DIST_DIR"
TARBALL="$DIST_DIR/furtka-$VERSION.tar.gz"
tar -czf "$TARBALL" -C "$STAGE" .
SHA=$(sha256sum "$TARBALL" | awk '{print $1}')
SIZE=$(stat -c%s "$TARBALL")
printf '%s %s\n' "$SHA" "$(basename "$TARBALL")" > "$TARBALL.sha256"
cat > "$DIST_DIR/release.json" <<EOF
{
"version": "$VERSION",
"sha256": "$SHA",
"size": $SIZE,
"created_at": "$(date -Iseconds)"
}
EOF
echo "Built $TARBALL"
echo " sha256: $SHA"
echo " size: $SIZE bytes"

View file

@ -1,118 +0,0 @@
#!/usr/bin/env bash
# Publish a Furtka release to the Forgejo releases page.
#
# Usage: ./scripts/publish-release.sh <version>
#
# Preconditions:
# - $FORGEJO_TOKEN set (PAT with write:repository)
# - dist/furtka-<version>.tar.gz + .sha256 + release.json already built
#
# Behaviour:
# 1. Read the [<version>] section from CHANGELOG.md for the release body.
# 2. Create a release on Forgejo (or fail if one already exists for the tag).
# 3. Upload the three assets sequentially (Forgejo's release API has been
# observed to choke on parallel uploads).
set -euo pipefail
VERSION="${1:?usage: $0 <version>}"
: "${FORGEJO_TOKEN:?FORGEJO_TOKEN must be set}"
HOST="${FORGEJO_HOST:-forgejo.sourcegate.online}"
REPO="${FORGEJO_REPO:-daniel/furtka}"
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
DIST_DIR="$REPO_ROOT/dist"
TARBALL="$DIST_DIR/furtka-$VERSION.tar.gz"
SHA_FILE="$TARBALL.sha256"
RELEASE_JSON="$DIST_DIR/release.json"
for f in "$TARBALL" "$SHA_FILE" "$RELEASE_JSON"; do
[ -f "$f" ] || { echo "missing: $f"; exit 1; }
done
# Extract the changelog section for this version. Matches `## [<version>]`
# up to (but not including) the next `## [` line.
BODY="$(awk -v v="$VERSION" '
BEGIN { inside=0 }
/^## \[/ {
if (inside) exit
if (index($0, "[" v "]") > 0) { inside=1; next }
}
inside { print }
' "$REPO_ROOT/CHANGELOG.md")"
if [ -z "$BODY" ]; then
BODY="Release $VERSION"
fi
PRERELEASE=false
if [[ "$VERSION" == *-alpha* || "$VERSION" == *-beta* || "$VERSION" == *-rc* ]]; then
PRERELEASE=true
fi
api() {
curl --silent --show-error --fail-with-body \
--header "Authorization: token $FORGEJO_TOKEN" \
"$@"
}
base="https://$HOST/api/v1/repos/$REPO"
# 1. Create the release. Python for JSON assembly so we don't depend on jq
# on the runner — the previous `apt-get install -y jq` step in release.yml
# hung for 15+ minutes on a slow mirror and stalled the whole publish.
release_body_json="$(
VERSION="$VERSION" BODY="$BODY" PRE="$PRERELEASE" python3 -c '
import json, os
print(json.dumps({
"tag_name": os.environ["VERSION"],
"name": os.environ["VERSION"],
"body": os.environ["BODY"],
"prerelease": os.environ["PRE"] == "true",
}))
'
)"
echo "==> Creating release $VERSION"
release_response="$(api --request POST "$base/releases" \
--header "Content-Type: application/json" \
--data "$release_body_json")"
release_id="$(echo "$release_response" | python3 -c 'import json, sys; print(json.load(sys.stdin).get("id", ""))')"
if [ -z "$release_id" ] || [ "$release_id" = "null" ]; then
echo "error: couldn't parse release id from response:"
echo "$release_response"
exit 1
fi
echo " release id: $release_id"
# 2. Upload assets — one at a time.
upload_asset() {
local path="$1"
local name
name="$(basename "$path")"
echo "==> Uploading $name"
api --request POST "$base/releases/$release_id/assets?name=$name" \
--form "attachment=@$path" > /dev/null
}
upload_asset "$TARBALL"
upload_asset "$SHA_FILE"
upload_asset "$RELEASE_JSON"
# Optional: attach the live-installer ISO when dist/furtka-<version>.iso
# exists. Release workflows that want this build the ISO via iso/build.sh
# and move the output here before calling publish-release. Local runs
# that skip the ISO step still publish the core release successfully.
#
# Soft-fail: the ISO is ~1 GB and Forgejo's reverse proxy has returned
# 504 on the upload even when the write eventually succeeds. The core
# tarball (which boxes need for self-update) is already uploaded above,
# so don't let an ISO transport hiccup fail the whole release.
ISO="$DIST_DIR/furtka-$VERSION.iso"
if [ -f "$ISO" ]; then
if ! upload_asset "$ISO"; then
echo "warning: ISO upload failed — release published without ISO asset" >&2
fi
fi
echo "Release $VERSION published: https://$HOST/$REPO/releases/tag/$VERSION"

View file

@ -1,238 +0,0 @@
#!/usr/bin/env bash
# Smoke-test a freshly built Furtka live ISO by booting it in a VM on the
# Proxmox test host (defaults to $PVE_TEST_HOST) and checking that the
# webinstaller answers HTTP 200 on :5000.
#
# Usage: ./scripts/smoke-vm.sh <iso-path>
#
# Required env:
# PVE_TEST_HOST IP/hostname of the test node (e.g. 192.168.178.165)
# PVE_TEST_TOKEN "user@realm!tokenid=secret" single string
#
# Optional env:
# PVE_TEST_NODE PVE node name; auto-detected from /nodes if empty
# PVE_TEST_ISO_STORAGE default "local"
# PVE_TEST_DISK_STORAGE default "local-lvm"
# PVE_TEST_BRIDGE default "vmbr0"
# PVE_TEST_VMID_MIN default 9000
# PVE_TEST_VMID_MAX default 9099
# PVE_TEST_KEEP how many past smoke VMs to retain (default 5)
# PVE_TEST_BOOT_TIMEOUT seconds to wait for :5000 (default 180)
# PVE_TEST_VM_MEMORY MiB of RAM for the smoke VM (default 8192). Bumped
# from 4096 on 2026-04-18 — mkinitcpio on 4 GB VMs
# OOM-ed the pollux host mid-install, pulling pveproxy
# + the runner connection down with it.
# PVE_TEST_VM_CORES vCPU count for the smoke VM (default 2)
# SMOKE_SHA commit SHA used in name/tag/MAC; defaults to git HEAD
#
# Exits 0 iff the ISO booted and :5000 returned 200. Prunes old VMs + ISOs
# after the test regardless of outcome so a failed build's VM stays behind
# for post-mortem (at the cost of the run before it).
set -euo pipefail
ISO_PATH="${1:?usage: $0 <iso-path>}"
[[ -f "$ISO_PATH" ]] || { echo "iso not found: $ISO_PATH" >&2; exit 1; }
: "${PVE_TEST_HOST:?PVE_TEST_HOST must be set}"
: "${PVE_TEST_TOKEN:?PVE_TEST_TOKEN must be set}"
ISO_STORAGE="${PVE_TEST_ISO_STORAGE:-local}"
DISK_STORAGE="${PVE_TEST_DISK_STORAGE:-local-lvm}"
BRIDGE="${PVE_TEST_BRIDGE:-vmbr0}"
VMID_MIN="${PVE_TEST_VMID_MIN:-9000}"
VMID_MAX="${PVE_TEST_VMID_MAX:-9099}"
KEEP="${PVE_TEST_KEEP:-5}"
BOOT_TIMEOUT="${PVE_TEST_BOOT_TIMEOUT:-180}"
VM_MEMORY="${PVE_TEST_VM_MEMORY:-8192}"
VM_CORES="${PVE_TEST_VM_CORES:-2}"
SHA="${SMOKE_SHA:-$(git rev-parse HEAD 2>/dev/null || echo unknownunknown)}"
SHORT_SHA="${SHA:0:12}"
API="https://${PVE_TEST_HOST}:8006/api2/json"
api() {
# Wrapper so that on non-2xx we print the PVE response body to stderr
# before bubbling the failure — otherwise `--fail-with-body` output
# gets swallowed by callers that pipe to /dev/null, and you're left
# staring at "curl: (22)" with no idea which permission is missing.
local body rc
body=$(curl --silent --show-error --fail-with-body -k \
--header "Authorization: PVEAPIToken=${PVE_TEST_TOKEN}" \
"$@" 2>&1)
rc=$?
if [[ $rc -ne 0 ]]; then
echo "!! PVE API call failed (rc=$rc)" >&2
echo "!! request: $*" >&2
[[ -n "$body" ]] && echo "!! response: $body" >&2
return $rc
fi
printf '%s' "$body"
}
# PVE returns {"data": <payload>}; grab .data into a python expression.
jget() {
python3 -c 'import json,sys; print(json.load(sys.stdin)["data"])'
}
# Auto-detect node name if not given: first entry from /nodes.
NODE="${PVE_TEST_NODE:-}"
if [[ -z "$NODE" ]]; then
NODE="$(api "$API/nodes" | python3 -c '
import json, sys
nodes = json.load(sys.stdin)["data"]
if not nodes:
sys.exit("no nodes returned from PVE")
print(nodes[0]["node"])
')"
fi
echo "==> node=$NODE sha=$SHORT_SHA iso=$(basename "$ISO_PATH")"
ISO_NAME="furtka-${SHORT_SHA}.iso"
VOLID="${ISO_STORAGE}:iso/${ISO_NAME}"
# --- Step 1: upload ISO (or reuse if same SHA already on PVE) ---------------
# For a given commit SHA the ISO bytes are reproducible, so if furtka-<sha>.iso
# is already in PVE storage from a prior smoke run we reuse it and skip the
# upload. Avoids DELETE-permission friction and shaves ~2 min off re-runs.
if api "$API/nodes/$NODE/storage/$ISO_STORAGE/content/$VOLID" \
--output /dev/null 2>/dev/null; then
echo "==> reusing existing ISO $VOLID"
else
echo "==> uploading ISO as $ISO_NAME"
api --request POST "$API/nodes/$NODE/storage/$ISO_STORAGE/upload" \
--form "content=iso" \
--form "filename=@${ISO_PATH};filename=${ISO_NAME}" \
> /dev/null
fi
# --- Step 2: pick a free VMID in the reserved range ------------------------
# List VMs on the node, filter by range, pick the lowest integer not in use.
USED="$(api "$API/nodes/$NODE/qemu" | python3 -c '
import json, sys
data = json.load(sys.stdin)["data"]
print(" ".join(str(v["vmid"]) for v in data))
')"
VMID=""
for ((id = VMID_MIN; id <= VMID_MAX; id++)); do
if ! [[ " $USED " == *" $id "* ]]; then
VMID="$id"
break
fi
done
[[ -n "$VMID" ]] || { echo "no free VMID in ${VMID_MIN}-${VMID_MAX}" >&2; exit 1; }
# Derive a stable MAC from the SHA. BC:24:11 is Proxmox's assigned OUI.
MAC_TAIL="$(echo "$SHORT_SHA" | tr 'a-z' 'A-Z' | cut -c1-6)"
MAC="BC:24:11:${MAC_TAIL:0:2}:${MAC_TAIL:2:2}:${MAC_TAIL:4:2}"
echo "==> creating VM $VMID name=furtka-smoke-${SHORT_SHA} mac=$MAC"
api --request POST "$API/nodes/$NODE/qemu" \
--data-urlencode "vmid=$VMID" \
--data-urlencode "name=furtka-smoke-${SHORT_SHA}" \
--data-urlencode "tags=furtka;smoke;sha-${SHORT_SHA}" \
--data-urlencode "cores=${VM_CORES}" \
--data-urlencode "memory=${VM_MEMORY}" \
--data-urlencode "bios=ovmf" \
--data-urlencode "machine=q35" \
--data-urlencode "ostype=l26" \
--data-urlencode "scsihw=virtio-scsi-single" \
--data-urlencode "efidisk0=${DISK_STORAGE}:1,efitype=4m,pre-enrolled-keys=0" \
--data-urlencode "scsi0=${DISK_STORAGE}:20,discard=on,ssd=1" \
--data-urlencode "ide2=${VOLID},media=cdrom" \
--data-urlencode "boot=order=ide2;scsi0" \
--data-urlencode "net0=virtio=${MAC},bridge=${BRIDGE},firewall=0" \
> /dev/null
echo "==> starting VM $VMID"
api --request POST "$API/nodes/$NODE/qemu/$VMID/status/start" > /dev/null
# --- Step 3: discover the VM's IP by MAC -----------------------------------
# The live ISO has no qemu-guest-agent, so PVE can't tell us the IP.
# We scan the LAN from the runner and match on our derived MAC.
MAC_LOWER="$(echo "$MAC" | tr 'A-Z' 'a-z')"
IP=""
deadline=$((SECONDS + 150))
while (( SECONDS < deadline )); do
# Capture-then-parse instead of piping directly into awk. `awk '... exit'`
# exits on first match, which SIGPIPEs the upstream arp-scan (exit 141).
# With `set -o pipefail` active that kills the whole script — exactly what
# happened the first time host-networking gave arp-scan real matches.
SCAN=""
if command -v arp-scan >/dev/null 2>&1; then
SCAN="$(sudo arp-scan --localnet --quiet --ignoredups 2>/dev/null || true)"
IP="$(awk -v m="$MAC_LOWER" 'tolower($2) == m { print $1; exit }' <<<"$SCAN")"
fi
if [[ -z "$IP" ]] && command -v nmap >/dev/null 2>&1; then
sudo nmap -sn -T4 192.168.178.0/24 >/dev/null 2>&1 || true
NEIGH="$(ip neigh show)"
IP="$(awk -v m="$MAC_LOWER" 'tolower($5) == m && $1 ~ /^[0-9]/ { print $1; exit }' <<<"$NEIGH")"
fi
[[ -n "$IP" ]] && break
sleep 5
done
if [[ -z "$IP" ]]; then
echo "!! never saw $MAC on the LAN within 150s" >&2
SMOKE_RC=1
else
echo "==> VM $VMID is at $IP (mac $MAC)"
fi
# --- Step 4: smoke the webinstaller ----------------------------------------
SMOKE_RC="${SMOKE_RC:-0}"
if [[ "$SMOKE_RC" -eq 0 ]]; then
echo "==> polling http://${IP}:5000 (timeout ${BOOT_TIMEOUT}s)"
end=$((SECONDS + BOOT_TIMEOUT))
while (( SECONDS < end )); do
if curl --silent --fail --max-time 5 --output /dev/null "http://${IP}:5000/"; then
echo "==> :5000 answered 200 — smoke passed"
SMOKE_RC=0
break
fi
SMOKE_RC=1
sleep 5
done
if [[ "$SMOKE_RC" -ne 0 ]]; then
echo "!! :5000 never returned 200 on ${IP}" >&2
fi
fi
# --- Step 5: prune old smoke VMs + ISOs ------------------------------------
echo "==> pruning smoke VMs, keeping last $KEEP"
# List VMs in the reserved range sorted by vmid desc; drop the first KEEP.
TO_DROP="$(api "$API/nodes/$NODE/qemu" | python3 -c "
import json, sys
lo, hi, keep = ${VMID_MIN}, ${VMID_MAX}, ${KEEP}
vms = [v for v in json.load(sys.stdin)['data']
if lo <= int(v['vmid']) <= hi]
vms.sort(key=lambda v: int(v['vmid']), reverse=True)
for v in vms[keep:]:
print(v['vmid'])
")"
for old in $TO_DROP; do
echo " dropping VM $old"
# Find the ISO the VM was booted from so we can delete it after.
OLD_ISO="$(api "$API/nodes/$NODE/qemu/$old/config" | python3 -c '
import json, sys, re
cfg = json.load(sys.stdin)["data"]
for k in ("ide0","ide1","ide2","ide3","sata0","sata1","sata2","sata3"):
v = cfg.get(k,"")
m = re.match(r"([^,]+),.*media=cdrom", v)
if m and m.group(1).endswith(".iso"):
print(m.group(1)); break
' || true)"
# Stop (ignore errors if already stopped), then purge.
api --request POST "$API/nodes/$NODE/qemu/$old/status/stop" \
--output /dev/null 2>/dev/null || true
# /qemu/<id> DELETE is async; the call returns a UPID but for our purposes
# "fire and forget" is fine — next prune will retry if it didn't land.
api --request DELETE "$API/nodes/$NODE/qemu/$old?purge=1&destroy-unreferenced-disks=1" \
--output /dev/null || echo " (delete of $old failed; skipping)"
if [[ -n "$OLD_ISO" && "$OLD_ISO" != "$VOLID" ]]; then
echo " dropping ISO $OLD_ISO"
api --request DELETE "$API/nodes/$NODE/storage/$ISO_STORAGE/content/$OLD_ISO" \
--output /dev/null 2>/dev/null || true
fi
done
exit "$SMOKE_RC"

File diff suppressed because it is too large Load diff

View file

@ -1,197 +0,0 @@
from app import (
build_archinstall_config,
build_archinstall_creds,
validate_step1,
)
def test_validate_step1_accepts_good_input():
errors, values = validate_step1(
{
"hostname": "furtka",
"username": "daniel",
"password": "topsecretpw",
"password2": "topsecretpw",
"language": "de",
}
)
assert errors == []
assert values == {
"hostname": "furtka",
"username": "daniel",
"password": "topsecretpw",
"language": "de",
}
def test_validate_step1_collects_all_errors():
errors, _ = validate_step1(
{
"hostname": "BAD!",
"username": "1bad",
"password": "short",
"password2": "mismatch",
"language": "xx",
}
)
assert len(errors) == 5
def test_build_archinstall_config_uses_selected_locale(monkeypatch):
# build_disk_config imports archinstall lazily; archinstall isn't
# installed in CI (only runs on the live ISO), so stub it out.
import app as app_module
monkeypatch.setattr(app_module, "build_disk_config", lambda d: {"stubbed_device": d})
cfg = build_archinstall_config(
{
"hostname": "h",
"username": "u",
"password": "pw12345678",
"language": "pl",
"boot_drive": "/dev/sda",
}
)
assert cfg["disk_config"] == {"stubbed_device": "/dev/sda"}
assert cfg["hostname"] == "h"
assert cfg["locale_config"]["locale"] == "pl_PL.UTF-8"
# Users moved out of config into creds once we adopted archinstall 4.x's
# `!password` sentinel; config only carries a gpasswd in custom_commands
# so the user lands in the docker group after docker is pacstrapped.
assert "users" not in cfg
assert cfg["custom_commands"][0] == "gpasswd -a u docker"
def test_build_archinstall_config_includes_post_install_bootstrap(monkeypatch, tmp_path):
# The installed system should come up with a Furtka landing page at
# http://<hostname>.local. That means caddy + avahi pacstrapped, the
# matching services enabled, a Caddyfile written into the target rootfs,
# nss-mdns spliced into nsswitch.conf, and the resource-manager tarball
# extracted into the versioned /opt/furtka/current layout.
import app as app_module
# Fake payload so _resource_manager_commands emits its full cmd tree.
fake_payload = tmp_path / "payload.tar.gz"
fake_payload.write_bytes(b"\x1f\x8b\x08\x00fake")
monkeypatch.setattr(app_module, "RESOURCE_MANAGER_PAYLOAD", fake_payload)
monkeypatch.setattr(app_module, "build_disk_config", lambda d: {"stubbed_device": d})
cfg = build_archinstall_config(
{
"hostname": "heimserver",
"username": "u",
"password": "pw12345678",
"language": "en",
"boot_drive": "/dev/sda",
}
)
for pkg in ("caddy", "avahi", "nss-mdns"):
assert pkg in cfg["packages"]
# Packaged units go in `services` (enabled before custom_commands runs);
# our own units don't exist at that point, so they must be enabled from
# within custom_commands after the unit files land on disk.
for svc in ("caddy", "avahi-daemon"):
assert svc in cfg["services"]
assert "furtka-welcome" not in cfg["services"]
assert "furtka-status.timer" not in cfg["services"]
joined = "\n".join(cfg["custom_commands"])
# Every furtka-* unit is systemctl-linked from the shipped asset tree and
# then enabled — no hand-written files under /etc/systemd/system/.
assert "systemctl link /opt/furtka/current/assets/systemd/" in joined
assert "systemctl enable furtka-api.service" in joined
for path in (
"/etc/caddy/Caddyfile",
"/var/lib/furtka/status.json",
"/var/lib/furtka/furtka.json",
):
assert path in joined, f"expected {path} to be written by custom_commands"
# /srv/furtka/www/ is retired — no writes should target it anymore.
assert "/srv/furtka/www" not in joined
assert "mdns_minimal" in joined
assert "nsswitch.conf" in joined
# Hostname is injected via /var/lib/furtka/furtka.json, not via sed.
assert "__HOSTNAME__" not in joined
def test_resource_manager_payload_landed_when_present(monkeypatch, tmp_path):
# When iso/build.sh has staged the resource-manager tarball, the
# post-install commands should untar it into /opt/furtka/versions/<ver>/,
# flip the /opt/furtka/current symlink, drop the `furtka` wrapper, and
# systemctl-link every Furtka unit file.
import app as app_module
fake_payload = tmp_path / "furtka-resource-manager.tar.gz"
fake_payload.write_bytes(b"\x1f\x8b\x08\x00fake-tarball-bytes")
monkeypatch.setattr(app_module, "RESOURCE_MANAGER_PAYLOAD", fake_payload)
monkeypatch.setattr(app_module, "build_disk_config", lambda d: {"stubbed_device": d})
cfg = build_archinstall_config(
{
"hostname": "heimserver",
"username": "u",
"password": "pw12345678",
"language": "en",
"boot_drive": "/dev/sda",
}
)
joined = "\n".join(cfg["custom_commands"])
# Tarball lands in the versioned slot, not flat /opt/furtka/.
assert 'tar -xzf - -C "$staging"' in joined
assert "/opt/furtka/versions/$ver" in joined
assert 'ln -sfn "/opt/furtka/versions/$ver" /opt/furtka/current' in joined
# CLI wrapper lands and references /opt/furtka/current. The wrapper body
# rides the base64 payload, so assert on the constant directly.
assert "/usr/local/bin/furtka" in joined
assert "PYTHONPATH=/opt/furtka/current" in app_module._FURTKA_WRAPPER_SH
# Every unit is linked + enabled.
for unit in app_module._FURTKA_UNITS:
assert f"/opt/furtka/current/assets/systemd/{unit}" in joined
assert unit in joined
# python is pacstrapped so the wrapper has an interpreter.
assert "python" in cfg["packages"]
def test_resource_manager_absent_without_payload(monkeypatch, tmp_path):
# Dev box / CI without an ISO build: payload doesn't exist. No tarball
# extract, no unit link, no enable — and no stale references to furtka-*
# units in custom_commands.
import app as app_module
monkeypatch.setattr(app_module, "RESOURCE_MANAGER_PAYLOAD", tmp_path / "does-not-exist.tar.gz")
monkeypatch.setattr(app_module, "build_disk_config", lambda d: {"stubbed_device": d})
cfg = build_archinstall_config(
{
"hostname": "heimserver",
"username": "u",
"password": "pw12345678",
"language": "en",
"boot_drive": "/dev/sda",
}
)
joined = "\n".join(cfg["custom_commands"])
assert "tar -xzf" not in joined
assert "systemctl link" not in joined
# The base system bootstrap (caddy, status.json placeholder, furtka.json)
# is unaffected.
assert "/etc/caddy/Caddyfile" in joined
assert "/var/lib/furtka/furtka.json" in joined
def test_build_archinstall_creds_uses_archinstall_sentinel_keys():
creds = build_archinstall_creds({"username": "u", "password": "pw12345678"})
assert creds["!root-password"] == "pw12345678"
assert creds["users"] == [
{
"username": "u",
"!password": "pw12345678",
"sudo": True,
"groups": [],
}
]

View file

@ -1,230 +0,0 @@
import json
from datetime import UTC, datetime, timedelta
import pytest
from furtka import auth
@pytest.fixture
def tmp_users_file(tmp_path, monkeypatch):
path = tmp_path / "users.json"
monkeypatch.setenv("FURTKA_USERS_FILE", str(path))
# Sessions and lockout state are module-level; wipe between tests so
# one doesn't leak a valid token (or a stale failure counter) into
# the next.
auth.SESSIONS.clear()
auth.LOCKOUT.clear_all()
return path
def test_hash_password_roundtrip():
h = auth.hash_password("hunter2")
assert h != "hunter2" # Not plain text.
assert auth.verify_password("hunter2", h) is True
assert auth.verify_password("hunter3", h) is False
def test_hash_password_is_salted():
# Two calls with the same password must produce different hashes.
a = auth.hash_password("same")
b = auth.hash_password("same")
assert a != b
# But both verify against the original.
assert auth.verify_password("same", a)
assert auth.verify_password("same", b)
def test_load_users_returns_empty_when_missing(tmp_users_file):
assert not tmp_users_file.exists()
assert auth.load_users() == {}
def test_load_users_returns_empty_on_junk(tmp_users_file):
tmp_users_file.write_text("{not json")
assert auth.load_users() == {}
def test_load_users_returns_empty_on_non_dict(tmp_users_file):
tmp_users_file.write_text("[]")
assert auth.load_users() == {}
def test_save_users_atomic_and_0600(tmp_users_file):
auth.save_users({"admin": {"hash": "x", "username": "daniel"}})
assert tmp_users_file.exists()
mode = tmp_users_file.stat().st_mode & 0o777
assert mode == 0o600, f"expected 0o600, got {oct(mode)}"
loaded = json.loads(tmp_users_file.read_text())
assert loaded["admin"]["username"] == "daniel"
def test_setup_needed_true_on_missing_file(tmp_users_file):
assert auth.setup_needed() is True
def test_setup_needed_true_on_empty_dict(tmp_users_file):
tmp_users_file.write_text("{}")
assert auth.setup_needed() is True
def test_setup_needed_false_when_admin_exists(tmp_users_file):
auth.create_admin("daniel", "secret-pw")
assert auth.setup_needed() is False
def test_create_admin_overwrites_file(tmp_users_file):
auth.create_admin("daniel", "secret-pw")
auth.create_admin("robert", "new-pw")
users = auth.load_users()
assert users["admin"]["username"] == "robert"
def test_authenticate_happy(tmp_users_file):
auth.create_admin("daniel", "secret-pw")
assert auth.authenticate("daniel", "secret-pw") is True
def test_authenticate_wrong_username(tmp_users_file):
auth.create_admin("daniel", "secret-pw")
assert auth.authenticate("robert", "secret-pw") is False
def test_authenticate_wrong_password(tmp_users_file):
auth.create_admin("daniel", "secret-pw")
assert auth.authenticate("daniel", "wrong") is False
def test_authenticate_no_admin(tmp_users_file):
assert auth.authenticate("daniel", "anything") is False
# ---- Session store ---------------------------------------------------------
def test_session_create_and_lookup(tmp_users_file):
s = auth.SESSIONS.create("daniel")
assert s.username == "daniel"
assert s.token
looked_up = auth.SESSIONS.lookup(s.token)
assert looked_up is not None
assert looked_up.username == "daniel"
def test_session_lookup_unknown_token(tmp_users_file):
assert auth.SESSIONS.lookup("not-a-real-token") is None
def test_session_lookup_none_token(tmp_users_file):
assert auth.SESSIONS.lookup(None) is None
assert auth.SESSIONS.lookup("") is None
def test_session_revoke(tmp_users_file):
s = auth.SESSIONS.create("daniel")
auth.SESSIONS.revoke(s.token)
assert auth.SESSIONS.lookup(s.token) is None
def test_session_expires(tmp_users_file, monkeypatch):
# Build a session store with a 0-second TTL so lookup immediately
# treats new sessions as expired.
store = auth.SessionStore(ttl_seconds=0)
s = store.create("daniel")
# Force the clock forward a hair so the > check fires.
monkeypatch.setattr(
auth,
"datetime",
_FakeDatetime(datetime.now(UTC) + timedelta(seconds=1)),
)
# The module-local datetime reference inside SessionStore.lookup
# resolves at call time. Verify that an expired session is dropped.
assert store.lookup(s.token) is None
class _FakeDatetime:
"""Tiny shim — only `.now(tz)` is used from SessionStore."""
def __init__(self, fixed_utc):
self._fixed = fixed_utc
def now(self, tz=None):
if tz is None:
return self._fixed.replace(tzinfo=None)
return self._fixed.astimezone(tz)
# ---- Login attempts / lockout ----------------------------------------------
def test_lockout_under_threshold_still_allowed(tmp_users_file):
store = auth.LoginAttempts(max_failures=3, window_seconds=60, lockout_seconds=60)
key = ("daniel", "10.0.0.1")
for _ in range(2):
store.register_failure(key)
assert store.is_locked(key) is False
assert store.retry_after_seconds(key) == 0
def test_lockout_triggers_at_threshold(tmp_users_file):
store = auth.LoginAttempts(max_failures=3, window_seconds=60, lockout_seconds=60)
key = ("daniel", "10.0.0.1")
for _ in range(3):
store.register_failure(key)
assert store.is_locked(key) is True
assert store.retry_after_seconds(key) > 0
assert store.retry_after_seconds(key) <= 60
def test_lockout_window_decay(tmp_users_file, monkeypatch):
store = auth.LoginAttempts(max_failures=3, window_seconds=60, lockout_seconds=60)
key = ("daniel", "10.0.0.1")
for _ in range(3):
store.register_failure(key)
assert store.is_locked(key) is True
# Jump 2 minutes ahead — all failures are older than the window
# and should be pruned on the next check.
monkeypatch.setattr(
auth,
"datetime",
_FakeDatetime(datetime.now(UTC) + timedelta(seconds=121)),
)
assert store.is_locked(key) is False
assert store.retry_after_seconds(key) == 0
def test_lockout_clear_resets(tmp_users_file):
store = auth.LoginAttempts(max_failures=2, window_seconds=60, lockout_seconds=60)
key = ("daniel", "10.0.0.1")
store.register_failure(key)
store.register_failure(key)
assert store.is_locked(key) is True
store.clear(key)
assert store.is_locked(key) is False
assert store.retry_after_seconds(key) == 0
def test_lockout_keys_are_independent(tmp_users_file):
store = auth.LoginAttempts(max_failures=2, window_seconds=60, lockout_seconds=60)
locked = ("daniel", "1.1.1.1")
other_ip = ("daniel", "2.2.2.2")
other_user = ("robert", "1.1.1.1")
store.register_failure(locked)
store.register_failure(locked)
assert store.is_locked(locked) is True
assert store.is_locked(other_ip) is False
assert store.is_locked(other_user) is False
def test_lockout_clear_all_wipes_every_key(tmp_users_file):
store = auth.LoginAttempts(max_failures=2, window_seconds=60, lockout_seconds=60)
a = ("daniel", "1.1.1.1")
b = ("robert", "2.2.2.2")
store.register_failure(a)
store.register_failure(a)
store.register_failure(b)
store.register_failure(b)
assert store.is_locked(a) and store.is_locked(b)
store.clear_all()
assert not store.is_locked(a)
assert not store.is_locked(b)

View file

@ -1,333 +0,0 @@
"""Tests for the apps-catalog sync flow.
Same shape as ``tests/test_updater.py``: fixture reloads the module with
env-overridden paths, fake tarballs land in tmp_path, Forgejo API is
stubbed via ``urllib.request.urlopen`` monkeypatching so nothing talks
to the network.
Asserts end-to-end atomicity: on any failure path bad sha256, broken
tarball, invalid manifest the live catalog dir is either left
untouched (if one existed) or absent (if it didn't).
"""
from __future__ import annotations
import io
import json
import tarfile
from pathlib import Path
import pytest
@pytest.fixture
def catalog(tmp_path, monkeypatch):
monkeypatch.setenv("FURTKA_CATALOG_DIR", str(tmp_path / "var_lib_furtka_catalog"))
monkeypatch.setenv("FURTKA_CATALOG_STATE", str(tmp_path / "var_lib_furtka_catalog-state.json"))
monkeypatch.setenv("FURTKA_CATALOG_LOCK", str(tmp_path / "catalog.lock"))
monkeypatch.setenv("FURTKA_FORGEJO_HOST", "forgejo.test.local")
monkeypatch.setenv("FURTKA_CATALOG_REPO", "daniel/furtka-apps")
import importlib
from furtka import catalog as c
from furtka import paths as p
importlib.reload(p)
importlib.reload(c)
return c
def _manifest(name: str = "fileshare") -> dict:
return {
"name": name,
"display_name": "Fileshare",
"version": "0.1.0",
"description": "Test fixture app",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
}
def _make_catalog_tarball(
path: Path,
version: str,
*,
apps: list[tuple[str, dict]] | None = None,
extra_entries: list[tuple[str, bytes]] | None = None,
) -> None:
"""Build a minimal valid catalog tarball.
`apps` is a list of (folder_name, manifest_dict). Each app folder gets
a `manifest.json` + a stub `docker-compose.yaml` + `icon.svg`.
`extra_entries` lets tests inject malformed content (path-traversal,
missing VERSION, ...) without rebuilding the helper.
"""
apps = apps if apps is not None else [("fileshare", _manifest())]
buf = io.BytesIO()
with tarfile.open(fileobj=buf, mode="w:gz") as tf:
entries: list[tuple[str, bytes]] = [("VERSION", f"{version}\n".encode())]
for folder, m in apps:
entries.append((f"apps/{folder}/manifest.json", json.dumps(m).encode()))
entries.append(
(f"apps/{folder}/docker-compose.yaml", b"services:\n app:\n image: scratch\n")
)
entries.append((f"apps/{folder}/icon.svg", b"<svg/>"))
if extra_entries:
entries.extend(extra_entries)
for name, data in entries:
info = tarfile.TarInfo(name=name)
info.size = len(data)
tf.addfile(info, io.BytesIO(data))
path.write_bytes(buf.getvalue())
def _stub_forgejo_release(
monkeypatch,
catalog,
*,
tag: str,
tarball_url: str = "https://forgejo.test.local/t.tar.gz",
sha_url: str = "https://forgejo.test.local/t.tar.gz.sha256",
releases: list | None = None,
):
"""Patch ``_rc.forgejo_api`` so check_catalog sees a canned release list."""
if releases is None:
releases = [
{
"tag_name": tag,
"assets": [
{"name": f"furtka-apps-{tag}.tar.gz", "browser_download_url": tarball_url},
{
"name": f"furtka-apps-{tag}.tar.gz.sha256",
"browser_download_url": sha_url,
},
],
}
]
def fake_api(host, repo, path, *, error_cls=RuntimeError):
return releases
from furtka import _release_common as _rc
monkeypatch.setattr(_rc, "forgejo_api", fake_api)
def _stub_download(monkeypatch, catalog, mapping: dict[str, bytes]):
"""Patch ``_rc.download`` so sync_catalog pulls from an in-memory map."""
from furtka import _release_common as _rc
def fake_download(url, dest, *, error_cls=RuntimeError):
if url not in mapping:
raise error_cls(f"test: no fake content for {url}")
dest.parent.mkdir(parents=True, exist_ok=True)
dest.write_bytes(mapping[url])
monkeypatch.setattr(_rc, "download", fake_download)
# --------------------------------------------------------------------------- #
# check_catalog
# --------------------------------------------------------------------------- #
def test_check_catalog_reports_update_when_versions_differ(catalog, monkeypatch, tmp_path):
# Pretend we already have catalog version 26.5 on disk; Forgejo reports 26.6.
catalog.catalog_dir().mkdir(parents=True)
(catalog.catalog_dir() / "VERSION").write_text("26.5\n")
_stub_forgejo_release(monkeypatch, catalog, tag="26.6")
check = catalog.check_catalog()
assert check.current == "26.5"
assert check.latest == "26.6"
assert check.update_available is True
assert check.tarball_url.endswith(".tar.gz")
assert check.sha256_url.endswith(".sha256")
def test_check_catalog_reports_up_to_date_when_same_version(catalog, monkeypatch):
catalog.catalog_dir().mkdir(parents=True)
(catalog.catalog_dir() / "VERSION").write_text("26.5\n")
_stub_forgejo_release(monkeypatch, catalog, tag="26.5")
check = catalog.check_catalog()
assert check.current == "26.5"
assert check.latest == "26.5"
assert check.update_available is False
def test_check_catalog_treats_missing_current_as_installable(catalog, monkeypatch):
# Fresh box, no catalog ever synced — any release is an update.
_stub_forgejo_release(monkeypatch, catalog, tag="26.5")
check = catalog.check_catalog()
assert check.current is None
assert check.update_available is True
def test_check_catalog_raises_when_no_releases_published(catalog, monkeypatch):
_stub_forgejo_release(monkeypatch, catalog, tag="x", releases=[])
with pytest.raises(catalog.CatalogError, match="no catalog releases"):
catalog.check_catalog()
# --------------------------------------------------------------------------- #
# sync_catalog — happy + error paths
# --------------------------------------------------------------------------- #
def test_sync_catalog_happy_path(catalog, monkeypatch, tmp_path):
import hashlib
tarball_path = tmp_path / "tarball.tar.gz"
_make_catalog_tarball(tarball_path, "26.6")
tarball_bytes = tarball_path.read_bytes()
sha = hashlib.sha256(tarball_bytes).hexdigest()
_stub_forgejo_release(monkeypatch, catalog, tag="26.6")
_stub_download(
monkeypatch,
catalog,
{
"https://forgejo.test.local/t.tar.gz": tarball_bytes,
"https://forgejo.test.local/t.tar.gz.sha256": (
f"{sha} furtka-apps-26.6.tar.gz\n".encode()
),
},
)
check = catalog.sync_catalog()
assert check.latest == "26.6"
assert (catalog.catalog_dir() / "VERSION").read_text().strip() == "26.6"
assert (catalog.catalog_dir() / "apps" / "fileshare" / "manifest.json").is_file()
state = catalog.read_state()
assert state["stage"] == "done"
assert state["version"] == "26.6"
def test_sync_catalog_noop_when_already_current(catalog, monkeypatch, tmp_path):
catalog.catalog_dir().mkdir(parents=True)
(catalog.catalog_dir() / "VERSION").write_text("26.5\n")
_stub_forgejo_release(monkeypatch, catalog, tag="26.5")
check = catalog.sync_catalog()
assert check.update_available is False
assert catalog.read_state()["stage"] == "done"
def test_sync_catalog_refuses_sha256_mismatch(catalog, monkeypatch, tmp_path):
tarball_path = tmp_path / "tarball.tar.gz"
_make_catalog_tarball(tarball_path, "26.6")
_stub_forgejo_release(monkeypatch, catalog, tag="26.6")
_stub_download(
monkeypatch,
catalog,
{
"https://forgejo.test.local/t.tar.gz": tarball_path.read_bytes(),
# Hash for some OTHER content — will mismatch.
"https://forgejo.test.local/t.tar.gz.sha256": (b"0" * 64 + b" wrong.tar.gz\n"),
},
)
with pytest.raises(catalog.CatalogError, match="sha256 mismatch"):
catalog.sync_catalog()
# Live catalog never existed, must still not exist after the failed sync.
assert not catalog.catalog_dir().exists()
def test_sync_catalog_refuses_tarball_with_invalid_manifest(catalog, monkeypatch, tmp_path):
import hashlib
bad_manifest = {"name": "broken"} # missing required fields
tarball_path = tmp_path / "tarball.tar.gz"
_make_catalog_tarball(tarball_path, "26.6", apps=[("broken", bad_manifest)])
tarball_bytes = tarball_path.read_bytes()
sha = hashlib.sha256(tarball_bytes).hexdigest()
_stub_forgejo_release(monkeypatch, catalog, tag="26.6")
_stub_download(
monkeypatch,
catalog,
{
"https://forgejo.test.local/t.tar.gz": tarball_bytes,
"https://forgejo.test.local/t.tar.gz.sha256": (
f"{sha} furtka-apps-26.6.tar.gz\n".encode()
),
},
)
with pytest.raises(catalog.CatalogError, match="invalid manifest"):
catalog.sync_catalog()
# Staging was cleaned; live catalog never materialised.
assert not catalog.catalog_dir().exists()
def test_sync_catalog_preserves_existing_catalog_on_failure(catalog, monkeypatch, tmp_path):
"""A failed sync must leave the previous live catalog intact so boxes
keep working until the next successful sync."""
import hashlib
# Seed a live catalog that represents a previous successful sync.
live = catalog.catalog_dir()
live.mkdir(parents=True)
(live / "VERSION").write_text("26.5\n")
(live / "apps").mkdir()
bad_manifest = {"name": "broken"} # invalid
tarball_path = tmp_path / "tarball.tar.gz"
_make_catalog_tarball(tarball_path, "26.6", apps=[("broken", bad_manifest)])
sha = hashlib.sha256(tarball_path.read_bytes()).hexdigest()
_stub_forgejo_release(monkeypatch, catalog, tag="26.6")
_stub_download(
monkeypatch,
catalog,
{
"https://forgejo.test.local/t.tar.gz": tarball_path.read_bytes(),
"https://forgejo.test.local/t.tar.gz.sha256": f"{sha} x\n".encode(),
},
)
with pytest.raises(catalog.CatalogError):
catalog.sync_catalog()
# The 26.5 live catalog survives the failed 26.6 sync.
assert (live / "VERSION").read_text().strip() == "26.5"
def test_sync_catalog_lock_contention(catalog, monkeypatch):
_stub_forgejo_release(monkeypatch, catalog, tag="26.6")
# Hold the lock from outside; the real sync_catalog call must refuse.
first = catalog.acquire_lock()
try:
with pytest.raises(catalog.CatalogError, match="already in progress"):
catalog.sync_catalog()
finally:
first.close()
# --------------------------------------------------------------------------- #
# state + current-version helpers
# --------------------------------------------------------------------------- #
def test_read_current_catalog_version_absent(catalog):
assert catalog.read_current_catalog_version() is None
def test_read_current_catalog_version_empty_file(catalog):
catalog.catalog_dir().mkdir(parents=True)
(catalog.catalog_dir() / "VERSION").write_text("\n")
assert catalog.read_current_catalog_version() is None
def test_write_and_read_state_round_trip(catalog):
catalog.write_state("downloading", latest="26.6")
s = catalog.read_state()
assert s["stage"] == "downloading"
assert s["latest"] == "26.6"
assert "updated_at" in s

View file

@ -1,105 +0,0 @@
import json
from furtka.cli import main
def _set_env(monkeypatch, tmp_path):
monkeypatch.setenv("FURTKA_APPS_DIR", str(tmp_path))
def test_app_list_empty(tmp_path, monkeypatch, capsys):
_set_env(monkeypatch, tmp_path)
rc = main(["app", "list"])
assert rc == 0
assert "no apps installed" in capsys.readouterr().out
def test_app_list_json_empty(tmp_path, monkeypatch, capsys):
_set_env(monkeypatch, tmp_path)
rc = main(["app", "list", "--json"])
assert rc == 0
assert json.loads(capsys.readouterr().out) == []
def test_app_list_json_with_one_app(tmp_path, monkeypatch, capsys):
_set_env(monkeypatch, tmp_path)
app = tmp_path / "fileshare"
app.mkdir()
(app / "manifest.json").write_text(
json.dumps(
{
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.0",
"description": "SMB",
"description_long": "Long description here.",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
"open_url": "smb://{host}/files",
"settings": [
{
"name": "SMB_USER",
"label": "User",
"description": "SMB user",
"type": "text",
"default": "furtka",
"required": True,
}
],
}
)
)
rc = main(["app", "list", "--json"])
assert rc == 0
data = json.loads(capsys.readouterr().out)
assert len(data) == 1
assert data[0]["ok"] is True
m = data[0]["manifest"]
assert m["name"] == "fileshare"
assert m["description_long"] == "Long description here."
assert m["open_url"] == "smb://{host}/files"
assert len(m["settings"]) == 1
assert m["settings"][0]["name"] == "SMB_USER"
assert m["settings"][0]["required"] is True
assert m["settings"][0]["default"] == "furtka"
def test_reconcile_dry_run_empty(tmp_path, monkeypatch, capsys):
_set_env(monkeypatch, tmp_path)
rc = main(["reconcile", "--dry-run"])
assert rc == 0
out = capsys.readouterr().out
assert "0 actions" in out
def test_app_install_bg_dispatches_to_runner(tmp_path, monkeypatch):
"""CLI `app install-bg <name>` must call install_runner.run_install(name).
This is the entry point the HTTP API fires via systemd-run; regression
here would leave the UI hanging at "pulling_image…" forever because
the background never transitions state.
"""
_set_env(monkeypatch, tmp_path)
from furtka import install_runner
called = []
monkeypatch.setattr(install_runner, "run_install", lambda name: called.append(name))
rc = main(["app", "install-bg", "fileshare"])
assert rc == 0
assert called == ["fileshare"]
def test_app_install_bg_returns_1_on_failure(tmp_path, monkeypatch, capsys):
_set_env(monkeypatch, tmp_path)
from furtka import install_runner
def boom(name):
raise RuntimeError("compose pull failed")
monkeypatch.setattr(install_runner, "run_install", boom)
rc = main(["app", "install-bg", "fileshare"])
assert rc == 1
err = capsys.readouterr().err
assert "install-bg failed" in err
assert "compose pull failed" in err

View file

@ -1,8 +1,6 @@
from drives import ( from drives import (
get_drive_type_label,
get_drive_type_score, get_drive_type_score,
get_size_score, get_size_score,
parse_lsblk_output,
parse_size_gb, parse_size_gb,
score_device, score_device,
) )
@ -57,61 +55,3 @@ def test_score_device_sums_type_and_size(monkeypatch):
monkeypatch.setattr(drives, "get_drive_health", lambda _: 10) monkeypatch.setattr(drives, "get_drive_health", lambda _: 10)
assert score_device("/dev/nvme0n1", 1024) == 15 + 10 + 10 assert score_device("/dev/nvme0n1", 1024) == 15 + 10 + 10
assert score_device("/dev/sda", 64) == 5 + 10 + 5 assert score_device("/dev/sda", 64) == 5 + 10 + 5
def test_parse_lsblk_drops_loop_and_rom(monkeypatch):
import drives
monkeypatch.setattr(drives, "_smart_status", lambda _: "passed")
output = "loop0 2.5G loop\nsr0 1024M rom\nsda 500G disk\nnvme0n1 1T disk\n"
devices = parse_lsblk_output(output)
names = [d["name"] for d in devices]
assert names == ["/dev/nvme0n1", "/dev/sda"]
def test_parse_lsblk_attaches_human_labels(monkeypatch):
import drives
monkeypatch.setattr(drives, "_smart_status", lambda _: "passed")
output = "nvme0n1 1T disk\n"
[dev] = parse_lsblk_output(output)
assert dev["type_label"] == "NVMe"
assert dev["health_label"] == "Healthy"
def test_parse_lsblk_surfaces_smart_warning(monkeypatch):
import drives
monkeypatch.setattr(drives, "_smart_status", lambda _: "failed")
[dev] = parse_lsblk_output("sda 500G disk\n")
assert dev["health_label"] == "SMART warning"
def test_drive_type_label_nvme_ssd_hdd():
assert get_drive_type_label("/dev/nvme0n1") == "NVMe"
assert get_drive_type_label("/dev/ssd0") == "SSD"
assert get_drive_type_label("/dev/sda") == "HDD"
def test_parse_lsblk_handles_empty_output():
assert parse_lsblk_output("") == []
def test_parse_lsblk_drops_boot_usb(monkeypatch):
import drives
monkeypatch.setattr(drives, "_smart_status", lambda _: "passed")
output = "sda 500G disk\nsdb 16G disk\nnvme0n1 1T disk\n"
devices = parse_lsblk_output(output, boot_disk="sdb")
names = [d["name"] for d in devices]
assert "/dev/sdb" not in names
assert names == ["/dev/nvme0n1", "/dev/sda"]
def test_parse_lsblk_no_boot_disk_keeps_all(monkeypatch):
import drives
monkeypatch.setattr(drives, "_smart_status", lambda _: "passed")
output = "sda 500G disk\nsdb 16G disk\n"
names = [d["name"] for d in parse_lsblk_output(output, boot_disk=None)]
assert set(names) == {"/dev/sda", "/dev/sdb"}

View file

@ -1,216 +0,0 @@
"""Tests for furtka.https — fingerprint extraction + HTTPS toggle.
Since 26.15-alpha the toggle writes/removes TWO snippets atomically:
- The top-level HTTPS listener snippet (enables :443 + tls internal)
- The :80-scoped redirect snippet (forces HTTP HTTPS)
The fingerprint case uses a throwaway self-signed EC cert with a known
reference fingerprint (computed once via `openssl x509 -fingerprint
-sha256 -noout`) so we verify the PEM DER SHA256 path without a
runtime subprocess dependency. The toggle cases stub the caddy reload
so we assert both snippet files are written / removed together and that
reload failures roll BOTH state back.
"""
import subprocess
import pytest
from furtka import https
# Self-signed test-only cert. Don't trust it anywhere; it's here because
# we need a real PEM whose fingerprint we can pre-compute.
_TEST_CERT_PEM = """-----BEGIN CERTIFICATE-----
MIIBjjCCATOgAwIBAgIUGIKx2BGMvNQwAcZvjwJiaJO1GvEwCgYIKoZIzj0EAwIw
HDEaMBgGA1UEAwwRRnVydGthIFRlc3QgTG9jYWwwHhcNMjYwNDE3MTAxNTMxWhcN
MzYwNDE0MTAxNTMxWjAcMRowGAYDVQQDDBFGdXJ0a2EgVGVzdCBMb2NhbDBZMBMG
ByqGSM49AgEGCCqGSM49AwEHA0IABIfWX2oVXrw+iv4lCcIIceoX24bvRdlEECB5
QoMYphmlOoI492tRCGHxA8eaIwIYqFn1DzBKBRSL0H3xcu+4Pg6jUzBRMB0GA1Ud
DgQWBBSMizCL5Kh+SLE5n12oKV05L9bJXjAfBgNVHSMEGDAWgBSMizCL5Kh+SLE5
n12oKV05L9bJXjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMCA0kAMEYCIQDp
6etGEuj7AGD5zzyzDSpmRiMEgBp1k6fVoLYW7N2K3AIhAK8khUp3gKPo4UqtWNK9
Cs/B0mzRy2MUPGdZ5QU6LoDz
-----END CERTIFICATE-----
"""
_TEST_CERT_FP_SHA256 = (
"40:A7:98:2E:8D:1F:4C:0D:9B:E6:87:ED:91:FA:6F:B1:"
"3D:8A:10:06:79:7C:08:A9:8F:AD:71:0C:B8:29:87:28"
)
def _paths(tmp_path):
"""Return the four paths the toggle touches, in a dict for kwargs
spreading. Keeps each test's fixture boilerplate small."""
return {
"snippet_dir": tmp_path / "furtka.d",
"snippet": tmp_path / "furtka.d" / "redirect.caddyfile",
"https_snippet_dir": tmp_path / "furtka-https.d",
"https_snippet": tmp_path / "furtka-https.d" / "https.caddyfile",
"hostname_file": tmp_path / "etc_hostname",
}
def _prepare_hostname(tmp_path, value="testbox"):
(tmp_path / "etc_hostname").write_text(f"{value}\n")
def test_ca_fingerprint_matches_openssl(tmp_path):
cert = tmp_path / "root.crt"
cert.write_text(_TEST_CERT_PEM)
fp_hex = https._ca_fingerprint(cert)
assert fp_hex is not None
assert https._format_fingerprint(fp_hex) == _TEST_CERT_FP_SHA256
def test_ca_fingerprint_missing_file(tmp_path):
assert https._ca_fingerprint(tmp_path / "nope.crt") is None
def test_ca_fingerprint_no_pem_block(tmp_path):
garbage = tmp_path / "root.crt"
garbage.write_text("not a certificate")
assert https._ca_fingerprint(garbage) is None
def test_status_no_ca_no_snippet(tmp_path):
s = https.status(ca_path=tmp_path / "root.crt", https_snippet=tmp_path / "https.caddyfile")
assert s == {
"ca_available": False,
"fingerprint_sha256": None,
"force_https": False,
"ca_download_url": "/rootCA.crt",
}
def test_status_with_ca_and_https_snippet(tmp_path):
ca = tmp_path / "root.crt"
ca.write_text(_TEST_CERT_PEM)
https_snip = tmp_path / "https.caddyfile"
https_snip.write_text("furtka.local, furtka {\n\ttls internal\n\timport furtka_routes\n}\n")
s = https.status(ca_path=ca, https_snippet=https_snip)
assert s["ca_available"] is True
assert s["fingerprint_sha256"] == _TEST_CERT_FP_SHA256
assert s["force_https"] is True
def test_status_force_reflects_https_snippet_not_redirect(tmp_path):
"""Authoritative signal for "HTTPS is on" is the listener snippet —
a lone redirect without a :443 listener wouldn't actually serve
HTTPS, so the status must NOT report it as on. Locks 26.15 semantic."""
ca = tmp_path / "root.crt"
ca.write_text(_TEST_CERT_PEM)
s = https.status(ca_path=ca, https_snippet=tmp_path / "does-not-exist.caddyfile")
assert s["force_https"] is False
def test_set_force_enable_writes_both_snippets_and_reloads(tmp_path):
_prepare_hostname(tmp_path)
p = _paths(tmp_path)
calls = []
def fake_reload():
calls.append("reload")
result = https.set_force_https(True, reload_caddy=fake_reload, **p)
assert result is True
assert p["snippet"].read_text() == https.REDIRECT_CONTENT
written = p["https_snippet"].read_text()
assert "testbox.local, testbox" in written
assert "tls internal" in written
assert "import furtka_routes" in written
assert calls == ["reload"]
def test_set_force_uses_fallback_hostname_when_file_missing(tmp_path):
# No /etc/hostname → fall back to 'furtka' so Caddy gets a parseable
# block instead of an empty hostname that would fail config load.
p = _paths(tmp_path)
result = https.set_force_https(True, reload_caddy=lambda: None, **p)
assert result is True
assert "furtka.local, furtka" in p["https_snippet"].read_text()
def test_set_force_disable_removes_both_snippets(tmp_path):
_prepare_hostname(tmp_path)
p = _paths(tmp_path)
p["snippet_dir"].mkdir()
p["https_snippet_dir"].mkdir()
p["snippet"].write_text(https.REDIRECT_CONTENT)
p["https_snippet"].write_text("furtka.local { tls internal }\n")
result = https.set_force_https(False, reload_caddy=lambda: None, **p)
assert result is False
assert not p["snippet"].exists()
assert not p["https_snippet"].exists()
def test_set_force_disable_is_idempotent_when_already_off(tmp_path):
p = _paths(tmp_path)
result = https.set_force_https(False, reload_caddy=lambda: None, **p)
assert result is False
assert not p["snippet"].exists()
assert not p["https_snippet"].exists()
def test_reload_failure_rolls_back_enable(tmp_path):
_prepare_hostname(tmp_path)
p = _paths(tmp_path)
def failing_reload():
raise subprocess.CalledProcessError(1, ["systemctl"], stderr="bad config")
with pytest.raises(https.HttpsError, match="caddy reload failed: bad config"):
https.set_force_https(True, reload_caddy=failing_reload, **p)
# Rollback: since neither snippet existed before, neither exists after.
assert not p["snippet"].exists()
assert not p["https_snippet"].exists()
def test_reload_failure_rolls_back_disable(tmp_path):
_prepare_hostname(tmp_path)
p = _paths(tmp_path)
p["snippet_dir"].mkdir()
p["https_snippet_dir"].mkdir()
original_redirect = "redir https://{host}{uri} permanent\n# marker\n"
original_https = "# old https block\nfurtka.local { tls internal }\n"
p["snippet"].write_text(original_redirect)
p["https_snippet"].write_text(original_https)
def failing_reload():
raise subprocess.CalledProcessError(1, ["systemctl"], stderr="bad config")
with pytest.raises(https.HttpsError):
https.set_force_https(False, reload_caddy=failing_reload, **p)
# Rollback: both snippets are restored to their exact prior contents.
assert p["snippet"].read_text() == original_redirect
assert p["https_snippet"].read_text() == original_https
def test_systemctl_missing_raises_and_rolls_back(tmp_path):
_prepare_hostname(tmp_path)
p = _paths(tmp_path)
def missing_systemctl():
raise FileNotFoundError(2, "No such file", "systemctl")
with pytest.raises(https.HttpsError, match="systemctl not available"):
https.set_force_https(True, reload_caddy=missing_systemctl, **p)
assert not p["snippet"].exists()
assert not p["https_snippet"].exists()
def test_redirect_snippet_content_is_caddy_redir_directive():
# Lock the exact directive. A regression here silently stops the
# redirect from taking effect even though the file-swap looks fine.
assert https.REDIRECT_CONTENT.strip() == "redir https://{host}{uri} permanent"
def test_https_snippet_content_has_tls_internal_and_routes(tmp_path):
# Lock the shape of the opt-in HTTPS listener block. Caddy parses
# this verbatim — changing the shape without updating the test
# risks shipping a silently-broken Caddyfile import.
s = https._https_snippet_content("mybox")
assert "mybox.local, mybox {" in s
assert "\ttls internal" in s
assert "\timport furtka_routes" in s
assert s.endswith("}\n")

View file

@ -1,177 +0,0 @@
"""Tests for the background app-install runner.
Same shape as test_catalog.py / test_updater.py: fixture reloads the
module with env-overridden paths, dockerops calls are stubbed so nothing
touches a real daemon. Asserts that state transitions happen in the
right order and that exceptions flip the state to "error" with the
message before re-raising.
"""
from __future__ import annotations
import json
from pathlib import Path
import pytest
@pytest.fixture
def runner(tmp_path, monkeypatch):
apps = tmp_path / "apps"
apps.mkdir()
monkeypatch.setenv("FURTKA_APPS_DIR", str(apps))
monkeypatch.setenv("FURTKA_INSTALL_STATE", str(tmp_path / "install-state.json"))
monkeypatch.setenv("FURTKA_INSTALL_LOCK", str(tmp_path / "install.lock"))
import importlib
from furtka import install_runner as r
from furtka import paths as p
importlib.reload(p)
importlib.reload(r)
return r
def _write_installed_app(apps_dir: Path, name: str = "fileshare"):
app = apps_dir / name
app.mkdir()
manifest = {
"name": name,
"display_name": "Fileshare",
"version": "0.1.0",
"description": "Test fixture",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
}
(app / "manifest.json").write_text(json.dumps(manifest))
(app / "docker-compose.yaml").write_text("services: {}\n")
return app
def test_write_and_read_state_round_trip(runner):
runner.write_state("pulling_image", app="jellyfin")
s = runner.read_state()
assert s["stage"] == "pulling_image"
assert s["app"] == "jellyfin"
assert "updated_at" in s
def test_read_state_returns_empty_when_missing(runner):
assert runner.read_state() == {}
def test_read_state_returns_empty_on_junk(runner):
runner.state_path().parent.mkdir(parents=True, exist_ok=True)
runner.state_path().write_text("{not json")
assert runner.read_state() == {}
def test_acquire_lock_prevents_concurrent_runs(runner):
held = runner.acquire_lock()
try:
with pytest.raises(runner.InstallRunnerError, match="in progress"):
runner.acquire_lock()
finally:
held.close()
def test_run_install_happy_path(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
calls = []
monkeypatch.setattr(dockerops, "compose_pull", lambda *a, **k: calls.append(("pull", a)))
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: calls.append(("vol", name)))
monkeypatch.setattr(dockerops, "compose_up", lambda *a, **k: calls.append(("up", a)))
runner.run_install("fileshare")
# Ordering: pull first, then volumes, then up.
assert [c[0] for c in calls] == ["pull", "vol", "up"]
# Exactly the namespaced volume name got created.
assert calls[1] == ("vol", "furtka_fileshare_files")
# Final state is "done" with the manifest version.
s = runner.read_state()
assert s["stage"] == "done"
assert s["app"] == "fileshare"
assert s["version"] == "0.1.0"
def test_run_install_writes_error_on_pull_failure(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
def boom(*a, **k):
raise dockerops.DockerError("pull failed: registry unreachable")
monkeypatch.setattr(dockerops, "compose_pull", boom)
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: None)
monkeypatch.setattr(dockerops, "compose_up", lambda *a, **k: None)
with pytest.raises(dockerops.DockerError):
runner.run_install("fileshare")
s = runner.read_state()
assert s["stage"] == "error"
assert s["app"] == "fileshare"
assert "registry unreachable" in s["error"]
def test_run_install_writes_error_on_up_failure(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
monkeypatch.setattr(dockerops, "compose_pull", lambda *a, **k: None)
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: None)
def boom(*a, **k):
raise dockerops.DockerError("compose up: container refused to start")
monkeypatch.setattr(dockerops, "compose_up", boom)
with pytest.raises(dockerops.DockerError):
runner.run_install("fileshare")
s = runner.read_state()
assert s["stage"] == "error"
assert "refused to start" in s["error"]
def test_run_install_releases_lock_after_done(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
monkeypatch.setattr(dockerops, "compose_pull", lambda *a, **k: None)
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: None)
monkeypatch.setattr(dockerops, "compose_up", lambda *a, **k: None)
runner.run_install("fileshare")
# Lock released — a fresh acquire must succeed.
fh = runner.acquire_lock()
fh.close()
def test_run_install_releases_lock_after_error(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
monkeypatch.setattr(
dockerops, "compose_pull", lambda *a, **k: (_ for _ in ()).throw(dockerops.DockerError("x"))
)
with pytest.raises(dockerops.DockerError):
runner.run_install("fileshare")
fh = runner.acquire_lock()
fh.close()

View file

@ -1,357 +0,0 @@
import json
import pytest
from furtka import installer
from furtka.paths import apps_dir, bundled_apps_dir
VALID_MANIFEST = {
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.0",
"description": "SMB share",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
}
@pytest.fixture
def fake_dirs(tmp_path, monkeypatch):
apps = tmp_path / "apps"
bundled = tmp_path / "bundled"
apps.mkdir()
bundled.mkdir()
monkeypatch.setenv("FURTKA_APPS_DIR", str(apps))
monkeypatch.setenv("FURTKA_BUNDLED_APPS_DIR", str(bundled))
return apps, bundled
def _write_app_source(root, name, manifest, env_example=None, env=None):
app = root / name
app.mkdir()
(app / "manifest.json").write_text(json.dumps(manifest))
(app / "docker-compose.yaml").write_text("services: {}\n")
if env_example is not None:
(app / ".env.example").write_text(env_example)
if env is not None:
(app / ".env").write_text(env)
return app
def test_resolve_source_explicit_path(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", VALID_MANIFEST)
resolved = installer.resolve_source(str(src))
assert resolved == src
def test_resolve_source_bundled_name(fake_dirs):
_, bundled = fake_dirs
src = _write_app_source(bundled, "fileshare", VALID_MANIFEST)
resolved = installer.resolve_source("fileshare")
assert resolved == src
def test_resolve_source_unknown_name(fake_dirs):
with pytest.raises(installer.InstallError, match="not found"):
installer.resolve_source("nope")
def test_resolve_source_path_with_slash_must_exist(fake_dirs):
with pytest.raises(installer.InstallError, match="not a directory"):
installer.resolve_source("./does-not-exist")
def test_install_from_copies_files(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", VALID_MANIFEST, env_example="A=1")
target = installer.install_from(src)
assert target == apps_dir() / "fileshare"
assert (target / "manifest.json").exists()
assert (target / "docker-compose.yaml").exists()
assert (target / ".env.example").exists()
# .env bootstrapped from .env.example since none was shipped
assert (target / ".env").read_text() == "A=1"
def test_install_from_preserves_existing_env(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", VALID_MANIFEST, env_example="A=new")
target = apps_dir() / "fileshare"
target.mkdir()
(target / ".env").write_text("A=user-edited")
installer.install_from(src)
# User .env not clobbered.
assert (target / ".env").read_text() == "A=user-edited"
# But .env.example was updated.
assert (target / ".env.example").read_text() == "A=new"
def test_install_from_rejects_missing_manifest(tmp_path, fake_dirs):
src = tmp_path / "broken"
src.mkdir()
with pytest.raises(installer.InstallError, match="manifest.json"):
installer.install_from(src)
def test_install_from_arbitrary_source_folder_name(tmp_path, fake_dirs):
# Source folder named "downloaded-fileshare-fork-v2" but manifest says
# "fileshare" — install lands at /var/lib/furtka/apps/fileshare/ regardless.
src = _write_app_source(
tmp_path,
"downloaded-fileshare-fork-v2",
VALID_MANIFEST,
env_example="A=real-value",
)
target = installer.install_from(src)
assert target.name == "fileshare"
assert (target / "manifest.json").exists()
def test_install_from_rejects_invalid_manifest(tmp_path, fake_dirs):
bad = dict(VALID_MANIFEST)
del bad["volumes"]
src = _write_app_source(tmp_path, "fileshare", bad)
with pytest.raises(installer.InstallError, match="volumes"):
installer.install_from(src)
def test_remove_deletes_folder(fake_dirs):
apps, _ = fake_dirs
(apps / "fileshare").mkdir()
(apps / "fileshare" / "manifest.json").write_text("{}")
installer.remove("fileshare")
assert not (apps / "fileshare").exists()
def test_remove_unknown_raises(fake_dirs):
with pytest.raises(installer.InstallError, match="not installed"):
installer.remove("ghost")
def test_bundled_apps_dir_uses_env_override(fake_dirs):
_, bundled = fake_dirs
assert bundled_apps_dir() == bundled
def test_install_refuses_placeholder_password(tmp_path, fake_dirs):
src = _write_app_source(
tmp_path, "fileshare", VALID_MANIFEST, env_example="SMB_PASSWORD=changeme"
)
with pytest.raises(installer.InstallError, match="placeholder values for SMB_PASSWORD"):
installer.install_from(src)
# Files should still have landed so the user can vim the .env in place.
target = apps_dir() / "fileshare"
assert (target / ".env").exists()
assert (target / "manifest.json").exists()
def test_install_succeeds_after_user_edits_env(tmp_path, fake_dirs):
# First run: refuses placeholder.
src = _write_app_source(
tmp_path, "fileshare", VALID_MANIFEST, env_example="SMB_PASSWORD=changeme"
)
with pytest.raises(installer.InstallError):
installer.install_from(src)
# User edits the live .env to a real secret.
target = apps_dir() / "fileshare"
(target / ".env").write_text("SMB_PASSWORD=hunter2\n")
# Re-run: now succeeds, user .env preserved.
installer.install_from(src)
assert (target / ".env").read_text().strip() == "SMB_PASSWORD=hunter2"
def test_install_locks_env_permissions(tmp_path, fake_dirs):
src = _write_app_source(
tmp_path, "fileshare", VALID_MANIFEST, env_example="SMB_PASSWORD=hunter2"
)
installer.install_from(src)
target = apps_dir() / "fileshare"
mode = (target / ".env").stat().st_mode & 0o777
assert mode == 0o600, f"expected 0o600 on .env, got {oct(mode)}"
def test_placeholder_check_ignores_comments_and_blanks(tmp_path, fake_dirs):
src = _write_app_source(
tmp_path,
"fileshare",
VALID_MANIFEST,
env_example="# default values\n\nSMB_PASSWORD=real-secret\n",
)
# Should NOT raise — only commented "changeme" mentions, no actual placeholder.
installer.install_from(src)
def test_placeholder_check_handles_quoted_values(tmp_path, fake_dirs):
src = _write_app_source(
tmp_path,
"fileshare",
VALID_MANIFEST,
env_example='SMB_PASSWORD="changeme"\n',
)
with pytest.raises(installer.InstallError, match="placeholder"):
installer.install_from(src)
# --- Settings-driven install -------------------------------------------------
SETTINGS_MANIFEST = dict(
VALID_MANIFEST,
settings=[
{
"name": "SMB_USER",
"label": "User",
"type": "text",
"default": "furtka",
"required": True,
},
{"name": "SMB_PASSWORD", "label": "Pass", "type": "password", "required": True},
],
)
def test_install_with_settings_writes_env(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", SETTINGS_MANIFEST)
installer.install_from(src, settings={"SMB_USER": "daniel", "SMB_PASSWORD": "hunter2"})
target = apps_dir() / "fileshare"
env = (target / ".env").read_text()
assert "SMB_USER=daniel" in env
assert "SMB_PASSWORD=hunter2" in env
def test_install_with_settings_rejects_empty_required(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", SETTINGS_MANIFEST)
# SMB_PASSWORD has no default and is required — submitting empty is rejected.
with pytest.raises(installer.InstallError, match="'SMB_PASSWORD' is required"):
installer.install_from(src, settings={"SMB_USER": "daniel", "SMB_PASSWORD": ""})
def test_install_with_settings_rejects_unknown_key(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", SETTINGS_MANIFEST)
with pytest.raises(installer.InstallError, match="unknown setting 'FOO'"):
installer.install_from(src, settings={"SMB_USER": "a", "SMB_PASSWORD": "b", "FOO": "x"})
def test_install_settings_merge_preserves_unchanged(tmp_path, fake_dirs):
# First install with full settings.
src = _write_app_source(tmp_path, "fileshare", SETTINGS_MANIFEST)
installer.install_from(src, settings={"SMB_USER": "daniel", "SMB_PASSWORD": "hunter2"})
# Second call with only password — user should keep existing user name.
installer.install_from(src, settings={"SMB_PASSWORD": "newpass"})
target = apps_dir() / "fileshare"
env = (target / ".env").read_text()
assert "SMB_USER=daniel" in env
assert "SMB_PASSWORD=newpass" in env
def test_install_settings_applies_defaults_on_first_install(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", SETTINGS_MANIFEST)
# Only password submitted; SMB_USER falls through to its manifest default
# ("furtka") and the required check passes because the merged view has it.
installer.install_from(src, settings={"SMB_PASSWORD": "hunter2"})
target = apps_dir() / "fileshare"
env = (target / ".env").read_text()
assert "SMB_USER=furtka" in env
assert "SMB_PASSWORD=hunter2" in env
def test_install_with_settings_writes_0600(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "fileshare", SETTINGS_MANIFEST)
installer.install_from(src, settings={"SMB_USER": "a", "SMB_PASSWORD": "b"})
mode = (apps_dir() / "fileshare" / ".env").stat().st_mode & 0o777
assert mode == 0o600
def test_read_env_values_roundtrip(tmp_path, fake_dirs):
from furtka.installer import read_env_values, write_env
p = tmp_path / ".env"
write_env(p, {"A": "plain", "B": "has space", "C": 'has "quote"', "D": ""})
values = read_env_values(p)
assert values == {"A": "plain", "B": "has space", "C": 'has "quote"', "D": ""}
# --- path-type settings ------------------------------------------------------
PATH_MANIFEST = dict(
VALID_MANIFEST,
name="jellyfin",
settings=[
{
"name": "MEDIA_PATH",
"label": "Medienordner",
"type": "path",
"required": True,
}
],
)
OPTIONAL_PATH_MANIFEST = dict(
VALID_MANIFEST,
name="jellyfin",
settings=[{"name": "OPTIONAL_PATH", "label": "Optional", "type": "path", "required": False}],
)
def test_install_with_valid_path_succeeds(tmp_path, fake_dirs):
media = tmp_path / "media"
media.mkdir()
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
installer.install_from(src, settings={"MEDIA_PATH": str(media)})
target = apps_dir() / "jellyfin"
assert f"MEDIA_PATH={media}" in (target / ".env").read_text()
def test_install_rejects_nonexistent_path(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
with pytest.raises(installer.InstallError, match="does not exist"):
installer.install_from(src, settings={"MEDIA_PATH": str(tmp_path / "ghost")})
def test_install_rejects_path_that_is_a_file(tmp_path, fake_dirs):
f = tmp_path / "not-a-dir"
f.write_text("hi")
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
with pytest.raises(installer.InstallError, match="is not a directory"):
installer.install_from(src, settings={"MEDIA_PATH": str(f)})
def test_install_rejects_relative_path(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
with pytest.raises(installer.InstallError, match="absolute path"):
installer.install_from(src, settings={"MEDIA_PATH": "media"})
def test_install_rejects_system_path(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
with pytest.raises(installer.InstallError, match="system path"):
installer.install_from(src, settings={"MEDIA_PATH": "/etc"})
def test_install_rejects_root_filesystem(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
with pytest.raises(installer.InstallError, match="system path"):
installer.install_from(src, settings={"MEDIA_PATH": "/"})
def test_install_rejects_deny_list_via_traversal(tmp_path, fake_dirs):
# /mnt/../etc resolves to /etc — must be caught after Path.resolve().
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
with pytest.raises(installer.InstallError, match="system path"):
installer.install_from(src, settings={"MEDIA_PATH": "/mnt/../etc"})
def test_install_accepts_empty_optional_path(tmp_path, fake_dirs):
src = _write_app_source(tmp_path, "jellyfin", OPTIONAL_PATH_MANIFEST)
installer.install_from(src, settings={"OPTIONAL_PATH": ""})
target = apps_dir() / "jellyfin"
assert (target / ".env").exists()
def test_update_env_rejects_invalid_path(tmp_path, fake_dirs):
# First install with a valid path.
media = tmp_path / "media"
media.mkdir()
src = _write_app_source(tmp_path, "jellyfin", PATH_MANIFEST)
installer.install_from(src, settings={"MEDIA_PATH": str(media)})
# Then try to update to a bad path.
with pytest.raises(installer.InstallError, match="does not exist"):
installer.update_env("jellyfin", {"MEDIA_PATH": str(tmp_path / "ghost")})

View file

@ -1,193 +0,0 @@
import json
import pytest
from furtka.manifest import Manifest, ManifestError, load_manifest
VALID_MANIFEST = {
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.0",
"description": "SMB share",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
}
def _write_app(tmp_path, name, payload):
app_dir = tmp_path / name
app_dir.mkdir()
(app_dir / "manifest.json").write_text(json.dumps(payload))
return app_dir / "manifest.json"
def test_load_valid_manifest(tmp_path):
path = _write_app(tmp_path, "fileshare", VALID_MANIFEST)
m = load_manifest(path)
assert isinstance(m, Manifest)
assert m.name == "fileshare"
assert m.volumes == ("files",)
assert m.ports == (445,)
def test_volume_namespacing(tmp_path):
path = _write_app(tmp_path, "fileshare", VALID_MANIFEST)
m = load_manifest(path)
assert m.volume_name("files") == "furtka_fileshare_files"
def test_unknown_volume_raises(tmp_path):
path = _write_app(tmp_path, "fileshare", VALID_MANIFEST)
m = load_manifest(path)
with pytest.raises(ManifestError):
m.volume_name("does-not-exist")
def test_missing_required_field(tmp_path):
bad = dict(VALID_MANIFEST)
del bad["display_name"]
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="display_name"):
load_manifest(path)
def test_name_must_match_when_expected_name_given(tmp_path):
# Scanner passes expected_name=<folder name> so /var/lib/furtka/apps/X/
# can't lie about its own identity.
path = _write_app(tmp_path, "wrong-folder", VALID_MANIFEST)
with pytest.raises(ManifestError, match="must equal 'wrong-folder'"):
load_manifest(path, expected_name="wrong-folder")
def test_name_check_skipped_without_expected_name(tmp_path):
# Installer loads from arbitrary source paths (e.g. /tmp/my-tweaked-app/)
# — the source folder name shouldn't matter, only the manifest's own name.
path = _write_app(tmp_path, "any-folder-name", VALID_MANIFEST)
m = load_manifest(path)
assert m.name == "fileshare"
def test_invalid_json(tmp_path):
app = tmp_path / "fileshare"
app.mkdir()
(app / "manifest.json").write_text("{not json")
with pytest.raises(ManifestError, match="invalid JSON"):
load_manifest(app / "manifest.json")
def test_volumes_wrong_type(tmp_path):
bad = dict(VALID_MANIFEST, volumes="files")
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="volumes"):
load_manifest(path)
def test_ports_wrong_type(tmp_path):
bad = dict(VALID_MANIFEST, ports=["445"])
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="ports"):
load_manifest(path)
def test_settings_optional_default_empty(tmp_path):
path = _write_app(tmp_path, "fileshare", VALID_MANIFEST)
m = load_manifest(path)
assert m.settings == ()
assert m.description_long == ""
assert m.open_url == ""
def test_open_url_stored_when_present(tmp_path):
payload = dict(VALID_MANIFEST, open_url="smb://{host}/files")
path = _write_app(tmp_path, "fileshare", payload)
m = load_manifest(path)
assert m.open_url == "smb://{host}/files"
def test_open_url_non_string_rejected(tmp_path):
payload = dict(VALID_MANIFEST, open_url=42)
path = _write_app(tmp_path, "fileshare", payload)
with pytest.raises(ManifestError, match="open_url"):
load_manifest(path)
def test_settings_parsed(tmp_path):
payload = dict(
VALID_MANIFEST,
description_long="Long description with details.",
settings=[
{
"name": "SMB_USER",
"label": "Benutzername",
"description": "Anmeldename",
"type": "text",
"default": "furtka",
"required": True,
},
{"name": "SMB_PASSWORD", "label": "Passwort", "type": "password", "required": True},
],
)
path = _write_app(tmp_path, "fileshare", payload)
m = load_manifest(path)
assert m.description_long == "Long description with details."
assert len(m.settings) == 2
assert m.settings[0].name == "SMB_USER"
assert m.settings[0].label == "Benutzername"
assert m.settings[0].default == "furtka"
assert m.settings[0].type == "text"
assert m.settings[0].required is True
assert m.settings[1].type == "password"
assert m.settings[1].default is None
def test_settings_reject_lowercase_name(tmp_path):
bad = dict(VALID_MANIFEST, settings=[{"name": "smb_user", "type": "text"}])
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="UPPER_SNAKE_CASE"):
load_manifest(path)
def test_settings_reject_unknown_type(tmp_path):
bad = dict(VALID_MANIFEST, settings=[{"name": "FOO", "type": "email"}])
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="type must be one of"):
load_manifest(path)
def test_settings_accept_path_type(tmp_path):
payload = dict(
VALID_MANIFEST,
settings=[
{
"name": "MEDIA_PATH",
"label": "Medienordner",
"description": "Absoluter Pfad zu deinen Medien",
"type": "path",
"required": True,
}
],
)
path = _write_app(tmp_path, "fileshare", payload)
m = load_manifest(path)
assert len(m.settings) == 1
assert m.settings[0].name == "MEDIA_PATH"
assert m.settings[0].type == "path"
assert m.settings[0].required is True
def test_settings_reject_duplicate_name(tmp_path):
bad = dict(
VALID_MANIFEST,
settings=[{"name": "FOO", "type": "text"}, {"name": "FOO", "type": "password"}],
)
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="duplicate"):
load_manifest(path)
def test_settings_non_list_rejected(tmp_path):
bad = dict(VALID_MANIFEST, settings={"FOO": "bar"})
path = _write_app(tmp_path, "fileshare", bad)
with pytest.raises(ManifestError, match="settings must be a list"):
load_manifest(path)

View file

@ -1,74 +0,0 @@
"""Tests for furtka.passwd — stdlib-only password hashing.
The primary contract: hash/verify roundtrips cleanly, AND the verifier
accepts the werkzeug hash format that 26.11 / 26.12 boxes wrote to
``users.json``. Losing that backward compat would lock out existing
admins after a 26.13+ upgrade.
"""
from __future__ import annotations
from furtka import passwd
def test_hash_roundtrip():
h = passwd.hash_password("hunter2")
assert passwd.verify_password("hunter2", h)
assert not passwd.verify_password("wrong", h)
def test_hash_is_salted():
# Two separate hashes of the same password must diverge.
a = passwd.hash_password("same-pw")
b = passwd.hash_password("same-pw")
assert a != b
assert passwd.verify_password("same-pw", a)
assert passwd.verify_password("same-pw", b)
def test_generated_hash_format():
# Shape is pbkdf2:<hash>:<iter>$<salt>$<hex>
h = passwd.hash_password("x")
parts = h.split("$", 2)
assert len(parts) == 3
method, salt, digest = parts
assert method.startswith("pbkdf2:sha256:")
assert salt
# digest is hex of pbkdf2_hmac sha256 → 64 hex chars
assert len(digest) == 64
assert all(c in "0123456789abcdef" for c in digest)
def test_verify_werkzeug_scrypt_hash():
"""Known werkzeug scrypt hash generated by 26.11 / 26.12 boxes.
Captured live off a .196 test VM after its auth bootstrap:
username=daniel, password=test-admin-pw1
Hash format: scrypt:32768:8:1$<salt>$<hex>
If this regresses, every existing box that upgraded via 26.11 and
set a password gets locked out on the next upgrade.
"""
known = (
"scrypt:32768:8:1$yWZUqJodowt9ieI1$"
"2d1059b3564da7492b4aa3c2be7fff6fef06085e5e1bfd52e897948c58246b7a"
"9603400355b7264f61c4436eba7bf8c947adec3d7a76be03b50efb4227e15a80"
)
assert passwd.verify_password("test-admin-pw1", known)
assert not passwd.verify_password("wrong-password", known)
def test_verify_rejects_malformed_hashes():
# Empty / missing delimiters / unknown method / bad int — all False.
assert not passwd.verify_password("x", "")
assert not passwd.verify_password("x", "nothingspecial")
assert not passwd.verify_password("x", "pbkdf2:sha256:600000") # no $salt$digest
assert not passwd.verify_password("x", "pbkdf2$salt$digest") # missing hash + iter
assert not passwd.verify_password("x", "bcrypt:12$salt$digest") # unsupported algo
assert not passwd.verify_password("x", "pbkdf2:sha256:abc$salt$digest") # bad iter int
def test_verify_rejects_nonstring_inputs():
# Defensive: users.json can be corrupted or have nulls.
assert not passwd.verify_password(None, "pbkdf2:sha256:1000$salt$digest") # type: ignore[arg-type]
assert not passwd.verify_password("x", None) # type: ignore[arg-type]
assert not passwd.verify_password("x", 12345) # type: ignore[arg-type]

View file

@ -1,135 +0,0 @@
import json
import pytest
from furtka import dockerops, reconciler
VALID_MANIFEST = {
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.0",
"description": "SMB share",
"volumes": ["files", "config"],
"ports": [445],
"icon": "icon.svg",
}
@pytest.fixture
def fake_docker(monkeypatch):
"""Replace dockerops with no-op recorders so reconcile() doesn't shell out."""
calls: dict[str, list] = {"ensure_volume": [], "compose_up": [], "compose_down": []}
existing_volumes: set[str] = set()
def fake_ensure(name):
calls["ensure_volume"].append(name)
if name in existing_volumes:
return False
existing_volumes.add(name)
return True
def fake_compose_up(app_dir, project):
calls["compose_up"].append((str(app_dir), project))
def fake_compose_down(app_dir, project):
calls["compose_down"].append((str(app_dir), project))
monkeypatch.setattr(dockerops, "ensure_volume", fake_ensure)
monkeypatch.setattr(dockerops, "compose_up", fake_compose_up)
monkeypatch.setattr(dockerops, "compose_down", fake_compose_down)
return calls
def _make_app(root, name, manifest=None):
app = root / name
app.mkdir(parents=True)
if manifest is not None:
(app / "manifest.json").write_text(json.dumps(manifest))
(app / "docker-compose.yaml").write_text("services: {}\n")
return app
def test_reconcile_empty_root(tmp_path, fake_docker):
actions = reconciler.reconcile(tmp_path)
assert actions == []
assert fake_docker["ensure_volume"] == []
assert fake_docker["compose_up"] == []
def test_reconcile_one_app(tmp_path, fake_docker):
_make_app(tmp_path, "fileshare", VALID_MANIFEST)
actions = reconciler.reconcile(tmp_path)
assert [(a.kind, a.target) for a in actions] == [
("ensure_volume", "furtka_fileshare_files"),
("ensure_volume", "furtka_fileshare_config"),
("compose_up", "fileshare"),
]
assert fake_docker["ensure_volume"] == [
"furtka_fileshare_files",
"furtka_fileshare_config",
]
assert len(fake_docker["compose_up"]) == 1
assert fake_docker["compose_up"][0][1] == "fileshare"
def test_reconcile_dry_run_does_not_act(tmp_path, fake_docker):
_make_app(tmp_path, "fileshare", VALID_MANIFEST)
actions = reconciler.reconcile(tmp_path, dry_run=True)
assert len(actions) == 3
assert fake_docker["ensure_volume"] == []
assert fake_docker["compose_up"] == []
def test_reconcile_skips_broken_manifest(tmp_path, fake_docker):
_make_app(tmp_path, "fileshare", VALID_MANIFEST)
_make_app(tmp_path, "broken") # no manifest
actions = reconciler.reconcile(tmp_path)
skip_actions = [a for a in actions if a.kind == "skip"]
assert len(skip_actions) == 1
assert skip_actions[0].target == "broken"
# Healthy app still got reconciled.
assert fake_docker["compose_up"] == [(str(tmp_path / "fileshare"), "fileshare")]
def test_reconcile_isolates_per_app_docker_failure(tmp_path, monkeypatch):
"""A docker error on one app must not block reconcile of the others."""
_make_app(tmp_path, "alpha", dict(VALID_MANIFEST, name="alpha", volumes=["a"]))
_make_app(tmp_path, "broken-app", dict(VALID_MANIFEST, name="broken-app", volumes=["b"]))
_make_app(tmp_path, "zulu", dict(VALID_MANIFEST, name="zulu", volumes=["z"]))
succeeded_compose: list[str] = []
def fake_ensure(name):
return True
def fake_compose_up(app_dir, project):
if project == "broken-app":
raise dockerops.DockerError("simulated daemon failure")
succeeded_compose.append(project)
monkeypatch.setattr(dockerops, "ensure_volume", fake_ensure)
monkeypatch.setattr(dockerops, "compose_up", fake_compose_up)
actions = reconciler.reconcile(tmp_path)
error_actions = [a for a in actions if a.kind == "error"]
assert reconciler.has_errors(actions)
assert len(error_actions) == 1
assert error_actions[0].target == "broken-app"
# Both healthy apps reconciled despite the broken one in the middle.
assert succeeded_compose == ["alpha", "zulu"]
def test_reconcile_isolates_missing_docker_binary(tmp_path, monkeypatch):
"""No docker binary on the box still produces a tidy per-app error."""
_make_app(tmp_path, "fileshare", VALID_MANIFEST)
def boom(*args, **kwargs):
raise FileNotFoundError("[Errno 2] No such file or directory: 'docker'")
monkeypatch.setattr(dockerops, "ensure_volume", boom)
actions = reconciler.reconcile(tmp_path)
assert reconciler.has_errors(actions)
error = next(a for a in actions if a.kind == "error")
assert error.target == "fileshare"
assert "docker" in error.detail

View file

@ -1,69 +0,0 @@
import json
from furtka.scanner import scan
VALID_MANIFEST = {
"name": "fileshare",
"display_name": "Network Files",
"version": "0.1.0",
"description": "SMB share",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
}
def _make_app(root, name, manifest=None):
app = root / name
app.mkdir(parents=True)
if manifest is not None:
(app / "manifest.json").write_text(json.dumps(manifest))
return app
def test_scan_missing_root(tmp_path):
assert scan(tmp_path / "does-not-exist") == []
def test_scan_empty_root(tmp_path):
assert scan(tmp_path) == []
def test_scan_valid_app(tmp_path):
_make_app(tmp_path, "fileshare", VALID_MANIFEST)
results = scan(tmp_path)
assert len(results) == 1
assert results[0].ok
assert results[0].manifest.name == "fileshare"
def test_scan_skips_files(tmp_path):
_make_app(tmp_path, "fileshare", VALID_MANIFEST)
(tmp_path / "stray.txt").write_text("ignore me")
results = scan(tmp_path)
assert len(results) == 1
def test_scan_missing_manifest(tmp_path):
_make_app(tmp_path, "broken")
results = scan(tmp_path)
assert len(results) == 1
assert not results[0].ok
assert "manifest.json missing" in results[0].error
def test_scan_invalid_manifest(tmp_path):
bad = dict(VALID_MANIFEST)
del bad["volumes"]
_make_app(tmp_path, "fileshare", bad)
results = scan(tmp_path)
assert len(results) == 1
assert not results[0].ok
assert "volumes" in results[0].error
def test_scan_sorted_by_name(tmp_path):
_make_app(tmp_path, "z-app", dict(VALID_MANIFEST, name="z-app"))
_make_app(tmp_path, "a-app", dict(VALID_MANIFEST, name="a-app"))
results = scan(tmp_path)
assert [r.path.name for r in results] == ["a-app", "z-app"]

View file

@ -1,108 +0,0 @@
"""Tests for the catalog > bundled resolver."""
from __future__ import annotations
import json
from pathlib import Path
import pytest
def _manifest(name: str = "fileshare") -> dict:
return {
"name": name,
"display_name": "Fileshare",
"version": "0.1.0",
"description": "x",
"volumes": [],
"ports": [],
"icon": "icon.svg",
}
@pytest.fixture
def sources_mod(tmp_path, monkeypatch):
monkeypatch.setenv("FURTKA_CATALOG_DIR", str(tmp_path / "catalog"))
monkeypatch.setenv("FURTKA_BUNDLED_APPS_DIR", str(tmp_path / "bundled"))
import importlib
from furtka import paths as p
from furtka import sources as s
importlib.reload(p)
importlib.reload(s)
return s
def _seed_app(root: Path, name: str, manifest: dict | None = None) -> Path:
folder = root / name
folder.mkdir(parents=True)
(folder / "manifest.json").write_text(json.dumps(manifest or _manifest(name)))
return folder
def test_resolve_app_name_returns_none_when_absent(sources_mod):
assert sources_mod.resolve_app_name("nope") is None
def test_resolve_app_name_prefers_catalog_over_bundled(sources_mod, tmp_path):
_seed_app(tmp_path / "catalog" / "apps", "fileshare")
_seed_app(tmp_path / "bundled", "fileshare")
result = sources_mod.resolve_app_name("fileshare")
assert result is not None
assert result.origin == "catalog"
assert result.path.parent.name == "apps"
assert result.path.parent.parent.name == "catalog"
def test_resolve_app_name_falls_back_to_bundled(sources_mod, tmp_path):
_seed_app(tmp_path / "bundled", "fileshare")
result = sources_mod.resolve_app_name("fileshare")
assert result is not None
assert result.origin == "bundled"
def test_resolve_app_name_ignores_folder_without_manifest(sources_mod, tmp_path):
# Empty folder is not a valid app even if the name matches.
(tmp_path / "catalog" / "apps" / "fileshare").mkdir(parents=True)
_seed_app(tmp_path / "bundled", "fileshare")
result = sources_mod.resolve_app_name("fileshare")
# Catalog entry without manifest is skipped; bundled wins.
assert result.origin == "bundled"
def test_list_available_unions_catalog_and_bundled(sources_mod, tmp_path):
_seed_app(tmp_path / "catalog" / "apps", "fileshare")
_seed_app(tmp_path / "bundled", "otherapp")
names = {s.path.name: s.origin for s in sources_mod.list_available()}
assert names == {"fileshare": "catalog", "otherapp": "bundled"}
def test_list_available_catalog_wins_on_collision(sources_mod, tmp_path):
_seed_app(tmp_path / "catalog" / "apps", "fileshare")
_seed_app(tmp_path / "bundled", "fileshare")
entries = sources_mod.list_available()
assert len(entries) == 1
assert entries[0].origin == "catalog"
def test_list_available_empty_when_neither_exists(sources_mod):
assert sources_mod.list_available() == []
def test_list_available_skips_non_dirs_and_no_manifest(sources_mod, tmp_path):
# A plain file in catalog/apps and an empty dir in bundled — both ignored.
cat_root = tmp_path / "catalog" / "apps"
cat_root.mkdir(parents=True)
(cat_root / "not-a-dir.txt").write_text("x")
(tmp_path / "bundled" / "emptyapp").mkdir(parents=True)
_seed_app(tmp_path / "bundled", "realapp")
entries = sources_mod.list_available()
assert [e.path.name for e in entries] == ["realapp"]

View file

@ -1,462 +0,0 @@
"""Tests for the Phase-2 self-update logic.
These tests exercise the pure logic in furtka/updater.py: version compare,
sha256 verify, tarball extract, symlink swap, state writes. The service-
restart + health-check paths are stubbed so tests don't talk to systemd or
hit the network.
FURTKA_ROOT + FURTKA_STATE_DIR + FURTKA_LOCK_PATH all override to tmp_path
so each test gets a clean filesystem.
"""
import hashlib
import io
import tarfile
from pathlib import Path
import pytest
@pytest.fixture
def updater(tmp_path, monkeypatch):
monkeypatch.setenv("FURTKA_ROOT", str(tmp_path / "opt_furtka"))
monkeypatch.setenv("FURTKA_STATE_DIR", str(tmp_path / "var_lib_furtka"))
monkeypatch.setenv("FURTKA_LOCK_PATH", str(tmp_path / "update.lock"))
monkeypatch.setenv("FURTKA_CADDYFILE_PATH", str(tmp_path / "etc_caddy" / "Caddyfile"))
monkeypatch.setenv("FURTKA_SYSTEMD_DIR", str(tmp_path / "etc_systemd_system"))
hostname_file = tmp_path / "etc_hostname"
hostname_file.write_text("testbox\n")
monkeypatch.setenv("FURTKA_HOSTNAME_FILE", str(hostname_file))
(tmp_path / "etc_systemd_system").mkdir()
# Reload the module so the path constants pick up the env vars.
import importlib
import furtka.updater as u
importlib.reload(u)
return u
def _make_tarball(path: Path, version: str):
"""Build a minimal valid Furtka release tarball at `path`."""
buf = io.BytesIO()
with tarfile.open(fileobj=buf, mode="w:gz") as tf:
for name, content in [
("VERSION", f"{version}\n"),
("furtka/__init__.py", ""),
("apps/fileshare/manifest.json", "{}"),
]:
data = content.encode()
info = tarfile.TarInfo(name=name)
info.size = len(data)
tf.addfile(info, io.BytesIO(data))
path.write_bytes(buf.getvalue())
def test_version_tuple_orders_prereleases_before_stable(updater):
vt = updater._version_tuple
assert vt("26.0-alpha") < vt("26.0-beta")
assert vt("26.0-beta") < vt("26.0-rc1")
assert vt("26.0-rc1") < vt("26.0")
assert vt("26.0") < vt("26.1-alpha")
assert vt("26.1-alpha") < vt("27.0-alpha")
def test_verify_tarball_accepts_matching_hash(tmp_path, updater):
tar = tmp_path / "t.tar.gz"
tar.write_bytes(b"hello world")
sha = hashlib.sha256(b"hello world").hexdigest()
updater.verify_tarball(tar, sha) # no raise
def test_verify_tarball_rejects_mismatch(tmp_path, updater):
tar = tmp_path / "t.tar.gz"
tar.write_bytes(b"hello world")
with pytest.raises(updater.UpdateError, match="sha256 mismatch"):
updater.verify_tarball(tar, "0" * 64)
def test_parse_sha256_sidecar_strips_filename(updater):
line = "abc123 furtka-26.1-alpha.tar.gz\n"
assert updater._parse_sha256_sidecar(line) == "abc123"
def test_parse_sha256_sidecar_rejects_empty(updater):
with pytest.raises(updater.UpdateError):
updater._parse_sha256_sidecar("")
def test_extract_tarball_returns_version_and_refuses_unsafe_paths(tmp_path, updater):
tar = tmp_path / "t.tar.gz"
_make_tarball(tar, "26.2-alpha")
dest = tmp_path / "dest"
assert updater._extract_tarball(tar, dest) == "26.2-alpha"
assert (dest / "VERSION").read_text().strip() == "26.2-alpha"
# Build a malicious tarball with a traversal path; must refuse.
evil = tmp_path / "evil.tar.gz"
buf = io.BytesIO()
with tarfile.open(fileobj=buf, mode="w:gz") as tf:
info = tarfile.TarInfo(name="../escape")
info.size = 0
tf.addfile(info, io.BytesIO(b""))
evil.write_bytes(buf.getvalue())
with pytest.raises(updater.UpdateError, match="refusing"):
updater._extract_tarball(evil, tmp_path / "dest2")
def test_write_and_read_state_round_trip(updater):
updater.write_state("downloading", latest="26.2-alpha")
s = updater.read_state()
assert s["stage"] == "downloading"
assert s["latest"] == "26.2-alpha"
assert "updated_at" in s
def _make_release_tarball(path: Path, version: str, caddyfile_body: str = "# caddy\n"):
"""Richer tarball with assets/Caddyfile + assets/systemd/ — enough for
apply_update's post-swap integration (caddy refresh, unit linking)."""
buf = io.BytesIO()
with tarfile.open(fileobj=buf, mode="w:gz") as tf:
for name, content in [
("VERSION", f"{version}\n"),
("furtka/__init__.py", ""),
("apps/fileshare/manifest.json", "{}"),
("assets/Caddyfile", caddyfile_body),
("assets/systemd/furtka-api.service", "[Service]\nExecStart=/bin/true\n"),
]:
data = content.encode()
info = tarfile.TarInfo(name=name)
info.size = len(data)
tf.addfile(info, io.BytesIO(data))
path.write_bytes(buf.getvalue())
def test_apply_update_happy_path(tmp_path, updater, monkeypatch):
# Set up an existing "26.0-alpha" current symlink so apply_update has
# something to swap out.
versions = updater.versions_dir()
versions.mkdir(parents=True)
(versions / "26.0-alpha").mkdir()
(versions / "26.0-alpha" / "VERSION").write_text("26.0-alpha\n")
current = updater.current_symlink()
current.symlink_to(versions / "26.0-alpha")
tar = tmp_path / "t.tar.gz"
_make_release_tarball(tar, "26.1-alpha", caddyfile_body="# new caddy config\n")
# Stub the shell-out + health check — both succeed.
monkeypatch.setattr(updater, "_run", lambda cmd: None)
monkeypatch.setattr(updater, "_health_check", lambda url, deadline_s=30.0: True)
updater.apply_update(tar, "26.1-alpha")
assert current.resolve() == (versions / "26.1-alpha").resolve()
assert (versions / "26.1-alpha" / "VERSION").read_text().strip() == "26.1-alpha"
# Version dir is 755 so Caddy can traverse it — the staging dir came
# from mktemp-ish extractall which defaults to 700, and carries through
# the rename unless we explicitly chmod.
assert (versions / "26.1-alpha").stat().st_mode & 0o777 == 0o755
# P1-2: Caddyfile was copied into /etc/caddy/ from the new version.
assert updater._CADDYFILE_LIVE.read_text() == "# new caddy config\n"
state = updater.read_state()
assert state["stage"] == "done"
assert state["version"] == "26.1-alpha"
def test_apply_update_rolls_back_on_health_check_failure(tmp_path, updater, monkeypatch):
versions = updater.versions_dir()
versions.mkdir(parents=True)
(versions / "26.0-alpha").mkdir()
(versions / "26.0-alpha" / "VERSION").write_text("26.0-alpha\n")
current = updater.current_symlink()
current.symlink_to(versions / "26.0-alpha")
tar = tmp_path / "t.tar.gz"
_make_tarball(tar, "26.1-alpha")
# _run succeeds (restart "works"), but the API never comes back healthy.
monkeypatch.setattr(updater, "_run", lambda cmd: None)
monkeypatch.setattr(updater, "_health_check", lambda url, deadline_s=30.0: False)
# The rollback path calls subprocess.run directly — stub that too so we
# don't actually try to restart a real service.
import subprocess
monkeypatch.setattr(subprocess, "run", lambda *a, **kw: None)
with pytest.raises(updater.UpdateError, match="rolled back"):
updater.apply_update(tar, "26.1-alpha")
# Symlink should point back at 26.0-alpha.
assert current.resolve() == (versions / "26.0-alpha").resolve()
state = updater.read_state()
assert state["stage"] == "rolled_back"
assert state["failed_version"] == "26.1-alpha"
def test_refresh_caddyfile_copies_when_different(updater, tmp_path):
# Fresh /etc/caddy/ — source wins.
src = tmp_path / "src"
src.write_text("# new\n")
assert updater._refresh_caddyfile(src) is True
assert updater._CADDYFILE_LIVE.read_text() == "# new\n"
# Same content — no-op.
assert updater._refresh_caddyfile(src) is False
def test_refresh_caddyfile_noops_if_source_missing(updater, tmp_path):
assert updater._refresh_caddyfile(tmp_path / "does-not-exist") is False
def test_refresh_caddyfile_substitutes_hostname_placeholder(updater, tmp_path):
# Self-update rewrites the shipped Caddyfile against the box's real
# hostname, same substitution the installer does on first boot. Without
# this the named-hostname :443 block ships with a literal
# `__FURTKA_HOSTNAME__` and Caddy refuses to load the config.
src = tmp_path / "src"
src.write_text("__FURTKA_HOSTNAME__.local, __FURTKA_HOSTNAME__ {\n\ttls internal\n}\n")
assert updater._refresh_caddyfile(src) is True
live = updater._CADDYFILE_LIVE.read_text()
assert "testbox.local, testbox {" in live
assert "__FURTKA_HOSTNAME__" not in live
# Second call with the same source is a no-op — rendered content matches.
assert updater._refresh_caddyfile(src) is False
def test_health_check_treats_4xx_as_healthy(updater, monkeypatch):
"""26.11+ auth makes /api/apps return 401 on unauth requests. If the
health check treated that as "down", every pre-auth auth upgrade
auto-rolls back. Server responding at all is enough signal for the
health check."""
import urllib.error
calls = {"n": 0}
class _FakeResp:
def __init__(self, code):
self.status = code
def __enter__(self):
return self
def __exit__(self, *a):
return False
def raising_401(url, timeout):
calls["n"] += 1
raise urllib.error.HTTPError(url, 401, "Unauthorized", {}, None)
monkeypatch.setattr("urllib.request.urlopen", raising_401)
assert updater._health_check("http://127.0.0.1:7000/api/apps", deadline_s=2.0) is True
# One call was enough — early exit on 4xx, no retry loop.
assert calls["n"] == 1
def test_health_check_rejects_5xx(updater, monkeypatch):
"""500s mean the server is up but broken — that's NOT healthy.
Distinguishes auth refusals (4xx = healthy) from real runtime
errors (5xx = unhealthy, roll back)."""
import urllib.error
def raising_500(url, timeout):
raise urllib.error.HTTPError(url, 500, "Internal Server Error", {}, None)
monkeypatch.setattr("urllib.request.urlopen", raising_500)
assert updater._health_check("http://127.0.0.1:7000/api/apps", deadline_s=1.5) is False
def test_health_check_retries_on_connection_refused(updater, monkeypatch):
"""While furtka-api is still starting, urlopen raises URLError.
The loop must keep polling until the server comes up or deadline."""
import urllib.error
calls = {"n": 0}
def flaky(url, timeout):
calls["n"] += 1
if calls["n"] < 3:
raise urllib.error.URLError("connection refused")
class _Resp:
status = 200
def __enter__(self):
return self
def __exit__(self, *a):
return False
return _Resp()
monkeypatch.setattr("urllib.request.urlopen", flaky)
assert updater._health_check("http://127.0.0.1:7000/api/apps", deadline_s=10.0) is True
assert calls["n"] == 3
def test_current_hostname_falls_back_when_file_missing(updater, monkeypatch, tmp_path):
monkeypatch.setenv("FURTKA_HOSTNAME_FILE", str(tmp_path / "missing"))
import importlib
importlib.reload(updater)
assert updater._current_hostname() == "furtka"
def test_link_new_units_only_links_missing(updater, tmp_path, monkeypatch):
unit_dir = tmp_path / "assets_systemd"
unit_dir.mkdir()
(unit_dir / "furtka-foo.service").write_text("[Service]\nExecStart=/bin/true\n")
(unit_dir / "furtka-bar.timer").write_text("[Timer]\nOnBootSec=1s\n")
(unit_dir / "ignored.txt").write_text("not a unit")
# Pretend furtka-foo is already linked — it must be skipped.
(updater._SYSTEMD_DIR / "furtka-foo.service").symlink_to("/dev/null")
seen = []
monkeypatch.setattr(updater, "_run", lambda cmd: seen.append(cmd))
linked = updater._link_new_units(unit_dir)
assert linked == ["furtka-bar.timer"]
# Two calls for the newly-linked timer: systemctl link + systemctl enable.
# The already-linked service is untouched. Timers need the follow-up
# `enable` so self-updates that introduce new timers don't leave them
# dormant — fresh installs get their enable via the webinstaller.
assert len(seen) == 2
assert seen[0][:2] == ["systemctl", "link"]
assert seen[0][2].endswith("furtka-bar.timer")
assert seen[1] == ["systemctl", "enable", "furtka-bar.timer"]
def test_extract_tarball_uses_data_filter_when_available(tmp_path, updater, monkeypatch):
# Confirm we pass filter='data' to extractall on Python 3.12+; fall back
# cleanly on older runtimes. Capture the kwarg via a stub. tarfile lives
# in furtka._release_common after the extraction refactor, so we patch
# that module — updater._extract_tarball delegates there.
from furtka import _release_common as _rc
calls = []
real_open = _rc.tarfile.open # capture before monkeypatching
class _Recorder:
def __init__(self, tarball):
self._tb = real_open(tarball, "r:gz")
def __enter__(self):
return self
def __exit__(self, *a):
self._tb.close()
def getmembers(self):
return self._tb.getmembers()
def extractall(self, *args, **kwargs):
calls.append(("extractall", args, kwargs))
# Force the TypeError branch when filter is passed, then re-run
# without — matches the older-Python fallback.
if "filter" in kwargs:
raise TypeError("old python")
return self._tb.extractall(*args)
tar = tmp_path / "t.tar.gz"
_make_release_tarball(tar, "26.9-alpha")
monkeypatch.setattr(_rc.tarfile, "open", lambda *a, **kw: _Recorder(tar))
dest = tmp_path / "dest"
updater._extract_tarball(tar, dest)
# First call had filter=, second (fallback) didn't.
assert len(calls) == 2
assert calls[0][2] == {"filter": "data"}
assert calls[1][2] == {}
def test_apply_update_rejects_version_mismatch(tmp_path, updater, monkeypatch):
versions = updater.versions_dir()
versions.mkdir(parents=True)
tar = tmp_path / "t.tar.gz"
_make_tarball(tar, "26.1-alpha")
with pytest.raises(updater.UpdateError, match="doesn't match expected"):
updater.apply_update(tar, "26.2-alpha")
def test_acquire_lock_prevents_concurrent_runs(tmp_path, updater):
first = updater.acquire_lock()
try:
with pytest.raises(updater.UpdateError, match="already in progress"):
updater.acquire_lock()
finally:
first.close()
def test_read_current_version_falls_back_to_dev(updater):
# No symlink, no VERSION — should be "dev" not raise.
assert updater.read_current_version() == "dev"
def test_rollback_flips_to_previous_slot(tmp_path, updater, monkeypatch):
versions = updater.versions_dir()
versions.mkdir(parents=True)
for v in ("26.0-alpha", "26.1-alpha"):
(versions / v).mkdir()
(versions / v / "VERSION").write_text(f"{v}\n")
current = updater.current_symlink()
current.symlink_to(versions / "26.1-alpha")
import subprocess
monkeypatch.setattr(subprocess, "run", lambda *a, **kw: None)
restored = updater.rollback()
assert restored == "26.0-alpha"
assert current.resolve() == (versions / "26.0-alpha").resolve()
def test_check_update_queries_forgejo_and_compares(updater, monkeypatch):
# Stub the API and the current-version read. Forgejo's /releases list
# returns most-recent first, including pre-releases — we take [0].
monkeypatch.setattr(updater, "read_current_version", lambda: "26.0-alpha")
monkeypatch.setattr(
updater,
"_forgejo_api",
lambda path: [
{
"tag_name": "26.1-alpha",
"assets": [
{
"name": "furtka-26.1-alpha.tar.gz",
"browser_download_url": "https://x/t.tar.gz",
},
{
"name": "furtka-26.1-alpha.tar.gz.sha256",
"browser_download_url": "https://x/t.tar.gz.sha256",
},
],
}
],
)
check = updater.check_update()
assert check.current == "26.0-alpha"
assert check.latest == "26.1-alpha"
assert check.update_available is True
assert check.tarball_url == "https://x/t.tar.gz"
assert check.sha256_url == "https://x/t.tar.gz.sha256"
def test_check_update_reports_up_to_date_when_same_version(updater, monkeypatch):
monkeypatch.setattr(updater, "read_current_version", lambda: "26.1-alpha")
monkeypatch.setattr(
updater,
"_forgejo_api",
lambda path: [{"tag_name": "26.1-alpha", "assets": []}],
)
check = updater.check_update()
assert check.update_available is False
def test_check_update_raises_when_no_releases_published(updater, monkeypatch):
# Newly-created repo with zero releases: don't crash, surface a clean
# error the UI can show instead of "HTTP 404 Not Found".
monkeypatch.setattr(updater, "read_current_version", lambda: "26.0-alpha")
monkeypatch.setattr(updater, "_forgejo_api", lambda path: [])
with pytest.raises(updater.UpdateError, match="no releases"):
updater.check_update()

View file

@ -1,261 +0,0 @@
"""Asset sourcing + install-flow tests for webinstaller.
Phase-2 slice 1a moved every HTML/CSS/script/unit payload out of inline
Python string constants and into real files under furtka/assets/. Slice 1b
then flipped Caddy to serve from /opt/furtka/current/assets/www/, retired
/srv/furtka/www/, and switched systemd units from hand-written files in
/etc/systemd/system/ to `systemctl link` against the shipped asset tree.
These tests lock the new contract:
- the Caddyfile and the status.json placeholder still land via
_write_file_cmd and match the on-disk asset bit-for-bit,
- the resource-manager bootstrap extracts to /opt/furtka/versions/<ver>/,
creates /opt/furtka/current, and systemctl-links every unit,
- furtka.json is written with the installer's hostname,
- no write ever targets /srv/furtka/www/ (that path is retired).
"""
import base64
import re
import sys
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "webinstaller"))
import app # noqa: E402
REPO_ROOT = Path(__file__).resolve().parent.parent
ASSETS = REPO_ROOT / "assets"
# (install target path, asset path under furtka/assets/) — only the files we
# still copy bit-for-bit at install time. Scripts + unit files are no longer
# copied; they're reached via /opt/furtka/current and `systemctl link`. The
# Caddyfile is not in this list because it's written with the hostname
# placeholder substituted — see test_post_install_substitutes_hostname_in_caddyfile.
ASSET_TARGETS = [
("/var/lib/furtka/status.json", "www/status.json"),
]
def _extract_written_content(cmd, target):
"""Pull the base64 payload back out of a _write_file_cmd() shell string."""
match = re.search(r"printf %s (\S+) \| base64 -d > " + re.escape(target), cmd)
assert match, f"cmd didn't look like a write of {target}: {cmd[:100]}"
return base64.b64decode(match.group(1)).decode("utf-8")
@pytest.fixture
def install_cmds(tmp_path, monkeypatch):
# Make _resource_manager_commands return a non-empty list so the full
# command tree is exercised. A fake payload file is enough since the
# test only inspects the generated shell strings.
fake = tmp_path / "payload.tar.gz"
fake.write_bytes(b"not a real tarball")
monkeypatch.setattr(app, "RESOURCE_MANAGER_PAYLOAD", fake)
return app._post_install_commands("testhost", "daniel", "test-admin-pw")
@pytest.mark.parametrize("target,asset_relpath", ASSET_TARGETS)
def test_post_install_writes_asset_from_disk(install_cmds, target, asset_relpath):
expected = (ASSETS / asset_relpath).read_text(encoding="utf-8")
matching = [c for c in install_cmds if f" > {target}" in c]
assert matching, f"no command writes {target}"
assert _extract_written_content(matching[0], target) == expected
def test_no_command_writes_to_retired_srv_path(install_cmds):
for c in install_cmds:
assert "/srv/furtka/www" not in c, f"retired /srv/furtka/www write: {c[:120]}"
def test_resource_manager_extracts_to_versioned_slot(install_cmds):
extract_cmd = next((c for c in install_cmds if "tar -xzf" in c), None)
assert extract_cmd is not None, "no tar extract command"
assert "/opt/furtka/versions" in extract_cmd
assert "staging-" in extract_cmd # mktemp -d pattern
assert 'cat "$staging/VERSION"' in extract_cmd
# An empty VERSION file must abort the install instead of silently
# moving the staging dir into versions/ as a subdir.
assert '[ -n "$ver" ]' in extract_cmd
# Version dir must be 755 so Caddy (non-root) can traverse it.
assert 'chmod 755 "/opt/furtka/versions/$ver"' in extract_cmd
assert 'ln -sfn "/opt/furtka/versions/$ver" /opt/furtka/current' in extract_cmd
def test_furtka_json_cmd_uses_heredoc_and_interpolates_hostname(install_cmds):
# Regression: the previous base64+sed version of this command was a
# silent no-op on some installs due to archinstall-side quoting; the
# heredoc version is the one that reliably writes furtka.json.
cmd = next((c for c in install_cmds if "/var/lib/furtka/furtka.json" in c), None)
assert cmd is not None
assert "cat > /var/lib/furtka/furtka.json <<EOF" in cmd
assert '"hostname": "testhost"' in cmd
assert "date -Iseconds" in cmd
assert "/opt/furtka/current/VERSION" in cmd
def test_resource_manager_systemctl_links_every_unit(install_cmds):
link_cmd = next((c for c in install_cmds if c.startswith("systemctl link ")), None)
assert link_cmd is not None, "no systemctl link command"
for unit in app._FURTKA_UNITS:
assert f"/opt/furtka/current/assets/systemd/{unit}" in link_cmd
def test_resource_manager_enables_all_units(install_cmds):
enable_cmd = next((c for c in install_cmds if c.startswith("systemctl enable ")), None)
assert enable_cmd is not None
for unit in app._FURTKA_UNITS:
assert unit in enable_cmd
def test_wrapper_script_points_at_current_symlink():
assert "PYTHONPATH=/opt/furtka/current" in app._FURTKA_WRAPPER_SH
def test_caddyfile_asset_serves_from_current():
caddy = (ASSETS / "Caddyfile").read_text()
assert "root * /opt/furtka/current/assets/www" in caddy
assert "/srv/furtka/www" not in caddy
# Runtime JSON paths served from /var/lib/furtka/ so updates don't clobber.
assert "root * /var/lib/furtka" in caddy
def _strip_caddy_comments(text: str) -> str:
"""Remove # comments + blank lines so string-match assertions can
target actual Caddyfile directives, not the leading doc block.
Comment intro is ``#`` at start-of-line or preceded by whitespace."""
out = []
for line in text.splitlines():
stripped = line.split("#", 1)[0].rstrip()
if stripped:
out.append(stripped)
return "\n".join(out)
def test_caddyfile_serves_http_by_default_https_opt_in():
# 26.15-alpha: HTTPS is opt-in. The default Caddyfile has a :80 block
# and imports /etc/caddy/furtka-https.d/*.caddyfile at top level —
# the /settings HTTPS toggle drops the hostname+tls-internal block
# into that dir when the user explicitly enables HTTPS. Default
# Caddyfile therefore contains no `tls internal` directive anywhere;
# if a future refactor puts it back, every fresh install regresses
# to the 26.14-era BAD_SIGNATURE trap. Strip comments first because
# the doc-block DOES mention `tls internal` in prose.
caddy_full = (ASSETS / "Caddyfile").read_text()
caddy = _strip_caddy_comments(caddy_full)
assert ":80 {" in caddy
assert "tls internal" not in caddy
assert "__FURTKA_HOSTNAME__" not in caddy
assert "import /etc/caddy/furtka-https.d/*.caddyfile" in caddy
# Shared routes still live in a named snippet so the HTTPS toggle's
# snippet can import the same routes without duplication.
assert "(furtka_routes)" in caddy
# Default Caddyfile imports it once (inside :80). The HTTPS snippet,
# when written by the toggle, imports it a second time.
assert caddy.count("import furtka_routes") == 1
def test_caddyfile_disables_caddy_auto_redirects():
# Named-hostname :443 block makes Caddy want to add its own HTTP→HTTPS
# redirect. The /settings toggle is the single source of truth, so the
# built-in has to be off — otherwise the toggle and auto_https race.
caddy = (ASSETS / "Caddyfile").read_text()
assert "auto_https disable_redirects" in caddy
def test_caddyfile_imports_force_redirect_snippet_dir():
# The /api/furtka/https/force endpoint toggles HTTP→HTTPS by writing or
# removing a snippet file in this dir; the Caddyfile must glob-import it
# inside the :80 block for the toggle to take effect.
caddy = (ASSETS / "Caddyfile").read_text()
assert "import /etc/caddy/furtka.d/*.caddyfile" in caddy
def test_caddyfile_exposes_root_ca_download():
# /rootCA.crt is the download handle the UI uses. It must map to the
# Caddy local-CA pki path and set a Content-Disposition so the browser
# treats it as a download rather than trying to render it. Path is the
# real one Caddy uses under XDG_DATA_HOME=/var/lib (see caddy.service
# Environment= directive) — not the /var/lib/caddy/.local/share/caddy/
# path Caddy docs show for non-systemd installs.
caddy = (ASSETS / "Caddyfile").read_text()
assert "handle /rootCA.crt" in caddy
assert "/var/lib/caddy/pki/authorities/local" in caddy
assert ".local/share/caddy" not in caddy
assert "attachment; filename=furtka-local-rootCA.crt" in caddy
def test_post_install_writes_caddyfile_without_hostname_placeholder(install_cmds):
# 26.15-alpha: the shipped Caddyfile no longer carries the
# __FURTKA_HOSTNAME__ marker — HTTPS + hostname now live in the
# opt-in snippet written by set_force_https(), not in the base
# Caddyfile. Verify the post-install writes the file as-is (no
# substitution expected) and it has the opt-in import glob.
caddyfile_cmd = next((c for c in install_cmds if " > /etc/caddy/Caddyfile" in c), None)
assert caddyfile_cmd is not None
written_full = _extract_written_content(caddyfile_cmd, "/etc/caddy/Caddyfile")
written = _strip_caddy_comments(written_full)
assert "__FURTKA_HOSTNAME__" not in written
assert "import /etc/caddy/furtka-https.d/*.caddyfile" in written
assert "tls internal" not in written
def test_post_install_creates_https_snippet_dir(install_cmds):
# The top-level HTTPS opt-in snippet dir must exist before Caddy's
# first start — its glob import tolerates an empty directory, but
# not a missing one on older Caddy builds. Parallel guarantee to
# test_post_install_creates_furtka_d_snippet_dir below.
matching = [c for c in install_cmds if "/etc/caddy/furtka-https.d" in c and "install -d" in c]
assert matching, "no install -d command creates /etc/caddy/furtka-https.d"
def test_post_install_creates_furtka_d_snippet_dir(install_cmds):
# Pre-existing installs pick up the import path via updater._refresh_caddyfile,
# but fresh installs never run that — this command is the only guarantee
# that the first Caddy start on a brand-new box has a dir to glob-import.
matching = [c for c in install_cmds if "/etc/caddy/furtka.d" in c and "install -d" in c]
assert matching, "no install -d command creates /etc/caddy/furtka.d"
def test_systemd_units_reference_current_paths():
for unit in ("furtka-status.service", "furtka-welcome.service"):
body = (ASSETS / "systemd" / unit).read_text()
assert "/opt/furtka/current/assets/bin/" in body, (
f"{unit} still references a /usr/local/bin path"
)
def test_read_asset_raises_for_missing_file():
with pytest.raises(FileNotFoundError):
app._read_asset("does/not/exist.html")
def test_assets_dir_resolves_to_repo_tree():
assert app._ASSETS_DIR == ASSETS
def test_post_install_writes_users_json_with_hashed_password(install_cmds):
"""The Furtka-admin users.json is created during the chroot post-install.
Without this, a fresh-install box lands at /login in first-run setup
mode and the user has to go through the browser to set a password
which defeats the "step-1 password works for everything" design. Also
check that the file is chmod 0600 (the PBKDF2 hash is a secret even
if it's slow to crack).
"""
import json as _json
from werkzeug.security import check_password_hash
users_cmd = next((c for c in install_cmds if " > /var/lib/furtka/users.json" in c), None)
assert users_cmd is not None, "no command writes /var/lib/furtka/users.json"
assert "chmod 600" in users_cmd, "users.json must be chmod 0600"
body = _extract_written_content(users_cmd, "/var/lib/furtka/users.json")
parsed = _json.loads(body)
assert "admin" in parsed
assert parsed["admin"]["username"] == "daniel" # matches fixture
# Hash is a real werkzeug hash, not the plaintext password.
assert parsed["admin"]["hash"] != "test-admin-pw"
assert check_password_hash(parsed["admin"]["hash"], "test-admin-pw")

View file

@ -1,646 +1,46 @@
# ruff: noqa: E501 — inline HTML/CSS/JS payloads (_INDEX_HTML, _STYLE_CSS,
# _CADDYFILE, _FURTKA_STATUS_SH, etc.) round-trip verbatim to the installed
# system; wrapping them hurts readability and the rendered output is what
# matters.
import base64
import json
import os
import re
import subprocess
import sys
from datetime import UTC
from pathlib import Path
from drives import list_scored_devices from drives import list_scored_devices
from flask import Flask, jsonify, redirect, render_template, request, url_for from flask import Flask, redirect, render_template, request, url_for
app = Flask(__name__) app = Flask(__name__)
def _resolve_version() -> str:
"""Resolve the Furtka version to display in the wizard footer.
On the live ISO `iso/build.sh` writes `/opt/furtka/VERSION` at build time
from `pyproject.toml`; that's the authoritative source at runtime. For
local dev runs (pytest, `flask run` outside the ISO) fall back to
reading `pyproject.toml` directly, then to the literal "dev" so the
footer never 500s if both files are missing.
"""
iso_path = Path(__file__).resolve().parent / "VERSION"
for candidate in (iso_path, Path(__file__).resolve().parent.parent / "pyproject.toml"):
try:
text = candidate.read_text(encoding="utf-8")
except (FileNotFoundError, PermissionError, OSError):
continue
if candidate.name == "VERSION":
value = text.strip()
if value:
return value
else:
match = re.search(r'^version\s*=\s*"([^"]+)"', text, re.MULTILINE)
if match:
return match.group(1)
return "dev"
FURTKA_VERSION = _resolve_version()
@app.context_processor
def _inject_version():
return {"furtka_version": FURTKA_VERSION}
LANGUAGES = {
"en": {"locale": "en_US.UTF-8", "label": "English", "keyboard": "us"},
"de": {"locale": "de_DE.UTF-8", "label": "Deutsch", "keyboard": "de"},
"pl": {"locale": "pl_PL.UTF-8", "label": "Polski", "keyboard": "pl"},
}
STATE_DIR = Path(os.environ.get("FURTKA_STATE_DIR", "/tmp/furtka"))
INSTALL_LOG = STATE_DIR / "install.log"
CONFIG_PATH = STATE_DIR / "user_configuration.json"
CREDS_PATH = STATE_DIR / "user_credentials.json"
# Pre-populated with sane defaults so the form has something useful on first
# render. POSTs validate and overwrite.
settings = { settings = {
# Step 1
"hostname": "furtka", "hostname": "furtka",
"username": "", "username": "",
"password": "", "password": "",
"password2": "",
"backend": False,
"backend_adress": "127.0.0.1",
"language": "en", "language": "en",
"boot_drive": "", # devices
"boot_drive_uuid": "",
} }
HOSTNAME_RE = re.compile(r"^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$")
USERNAME_RE = re.compile(r"^[a-z_][a-z0-9_-]{0,31}$")
# Ordered phase markers for the install progress bar. Each tuple is
# (substring to search for in the archinstall log, progress percent when
# reached, user-facing label). Pick the furthest phase whose marker is
# present in the log. If archinstall changes its stdout wording the bar
# stalls on the last recognized phase — the install itself keeps going.
PROGRESS_PHASES = [
("Wiping partitions", 8, "Preparing your disk"),
("Creating partitions", 12, "Creating partitions"),
("Starting installation", 15, "Starting installation"),
("Waiting for", 18, "Syncing time and packages"),
("Installing packages: ['base'", 25, "Installing the base system (this takes a while)"),
("Adding bootloader", 65, "Setting up boot"),
("Installing packages: ['efibootmgr'", 70, "Setting up boot"),
("Installing packages: ['docker'", 80, "Installing your apps"),
("Enabling service", 90, "Turning on services"),
("Updating /mnt/etc/fstab", 95, "Almost done"),
("Installation completed without any errors", 100, "Done!"),
]
PROGRESS_ERROR_MARKERS = ("Traceback (most recent call last)", "archinstall: error:")
def parse_install_progress(log):
percent = 2
phase = "Starting up…"
for marker, pct, label in PROGRESS_PHASES:
if marker in log:
percent = pct
phase = label
if percent >= 100:
status = "done"
elif any(m in log for m in PROGRESS_ERROR_MARKERS):
status = "error"
phase = "Installation failed — open Show details below"
else:
status = "running"
return {"percent": percent, "phase": phase, "status": status}
def validate_step1(form):
errors = []
values = {
"hostname": form.get("hostname", "").strip(),
"username": form.get("username", "").strip(),
"password": form.get("password", ""),
"language": form.get("language", ""),
}
password2 = form.get("password2", "")
if not HOSTNAME_RE.match(values["hostname"]):
errors.append("Hostname must be lowercase letters, digits, hyphens (max 63 chars).")
if not USERNAME_RE.match(values["username"]):
errors.append("Username must start with a letter or underscore, lowercase only.")
if len(values["password"]) < 8:
errors.append("Password must be at least 8 characters.")
if values["password"] != password2:
errors.append("Passwords do not match.")
if values["language"] not in LANGUAGES:
errors.append("Pick a language.")
return errors, values
def build_disk_config(boot_drive):
# archinstall 4.x dropped the `use_entire_disk` shortcut — `default_layout`
# now requires fully-specified partitions. We call suggest_single_disk_layout
# with ext4 + no separate /home, which short-circuits its interactive prompts.
import asyncio
from archinstall.lib.disk.device_handler import device_handler
from archinstall.lib.disk.disk_menu import suggest_single_disk_layout
from archinstall.lib.models.device import (
DiskLayoutConfiguration,
DiskLayoutType,
FilesystemType,
)
device_handler.load_devices()
device = device_handler.get_device(Path(boot_drive))
if device is None:
raise RuntimeError(f"archinstall could not resolve device {boot_drive!r}")
device_mod = asyncio.run(
suggest_single_disk_layout(
device,
# archinstall renamed the enum members to ALL_CAPS at some point
# between when we wrote this and the pinned Arch live ISO version.
# The old name `Ext4` now raises AttributeError at install time.
filesystem_type=FilesystemType.EXT4,
separate_home=False,
)
)
layout = DiskLayoutConfiguration(
config_type=DiskLayoutType.Default,
device_modifications=[device_mod],
)
return layout.json()
# ---------------------------------------------------------------------------
# Post-install bootstrap payload
#
# Written into the target system via archinstall's `custom_commands` so that
# after reboot the user lands in "Furtka": Caddy serves a branded landing
# page + live status tiles on :80, avahi advertises proksi.local, and the
# console shows a welcome banner pointing at the URL.
#
# Asset files (HTML, CSS, shell scripts, systemd units, Caddyfile) live in
# assets/ in the repo — at ISO build time they end up on the live ISO
# as part of the webinstaller's source tree AND inside the resource-manager
# payload tarball. The installer reads them from the live-ISO copy, base64-
# encodes them, and hands them to archinstall so the chroot recreates each
# file bit-for-bit. Updates (Phase 2) refresh the tarball, which carries the
# same assets to the target's /opt/furtka/ tree.
# ---------------------------------------------------------------------------
# Tarball built by iso/build.sh containing the furtka/ Python package + the
# bundled apps/ tree (plus assets/). The webinstaller reads it from the
# live ISO at request-time and base64-encodes it into a custom_command for
# archinstall.
RESOURCE_MANAGER_PAYLOAD = Path("/opt/furtka-resource-manager.tar.gz")
# Asset root. Two layouts we have to handle:
# dev / tests — webinstaller/app.py sits at repo_root/webinstaller/ and
# assets live at repo_root/assets/.
# live ISO — iso/build.sh copies webinstaller/ to /opt/furtka/ AND
# copies assets/ to /opt/assets/ right next to
# app.py, so the same "assets next to me" lookup works.
# Probe the sibling path first (ISO case), fall back to the repo layout.
def _resolve_assets_dir() -> Path:
here = Path(__file__).resolve().parent
sibling = here / "assets"
if sibling.is_dir():
return sibling
repo_copy = here.parent / "assets"
if repo_copy.is_dir():
return repo_copy
raise FileNotFoundError(
f"furtka assets not found near {here} — looked in {sibling} and {repo_copy}"
)
_ASSETS_DIR = _resolve_assets_dir()
def _read_asset(relpath: str) -> str:
"""Return the UTF-8 contents of an on-disk asset shipped under assets/.
Raises FileNotFoundError if the asset is missing, which is loud by design:
an install that tries to write an asset that isn't there is broken before
the user ever boots the target, not after.
"""
path = _ASSETS_DIR / relpath
return path.read_text(encoding="utf-8")
_FURTKA_WRAPPER_SH = """\
#!/bin/sh
# Tiny launcher for the furtka resource-manager CLI. The Python source lives
# under /opt/furtka/current/furtka/ — /current is a symlink that gets
# flipped by self-updates (Phase 2), so this shim stays stable across
# upgrades while the underlying code tree is swapped atomically.
PYTHONPATH=/opt/furtka/current exec python3 -m furtka.cli "$@"
"""
def _write_file_cmd(path, content, mode=None):
"""Shell command that recreates `path` with `content` inside the chroot.
Uses base64 so we don't have to worry about bash / JSON / archinstall
quoting the payload through three layers of shell. `base64` is part of
coreutils and always available in the target system.
"""
b64 = base64.b64encode(content.encode()).decode()
parent = path.rsplit("/", 1)[0]
cmd = f"mkdir -p {parent} && printf %s {b64} | base64 -d > {path}"
if mode is not None:
cmd += f" && chmod {mode} {path}"
return cmd
_FURTKA_UNITS = (
"furtka-api.service",
"furtka-reconcile.service",
"furtka-status.service",
"furtka-status.timer",
"furtka-welcome.service",
# Daily apps-catalog pull. Timer drives the service; the .service itself
# is oneshot and also callable ad-hoc via `furtka catalog sync`.
"furtka-catalog-sync.service",
"furtka-catalog-sync.timer",
)
def _resource_manager_commands():
"""Commands to land /opt/furtka/versions/<ver>/ + symlink /opt/furtka/current
+ the `furtka` CLI shim + systemctl-link the unit files.
Reads the payload tarball staged into the live ISO at build time. If the
file isn't present (dev box without an ISO build), returns [] so the rest
of the install still works the resource manager just won't be installed,
and nothing else on the system references furtka-* units.
"""
if not RESOURCE_MANAGER_PAYLOAD.exists():
print(
f"warning: {RESOURCE_MANAGER_PAYLOAD} missing, "
"resource manager will NOT be installed on target",
file=sys.stderr,
)
return []
payload_b64 = base64.b64encode(RESOURCE_MANAGER_PAYLOAD.read_bytes()).decode()
# Extract to a staging directory first, then rename to versions/<ver>/.
# That way the version-ID lookup is data-driven (reads VERSION from the
# tarball) instead of hardcoded at install-time — keeps the installer
# version-agnostic so a newer ISO doesn't need a webinstaller change to
# ship a new Furtka version.
extract_and_link = (
"mkdir -p /opt/furtka/versions && "
"staging=$(mktemp -d /opt/furtka/versions/staging-XXXXXX) && "
f'printf %s {payload_b64} | base64 -d | tar -xzf - -C "$staging" && '
'ver=$(cat "$staging/VERSION") && '
# Guard against an empty VERSION file: without this, `mv "$staging"
# "/opt/furtka/versions/"` would move the staging dir into versions/
# as a subdir and the symlink target would be invalid.
'[ -n "$ver" ] || { echo "empty VERSION in payload" >&2; exit 1; } && '
'mv "$staging" "/opt/furtka/versions/$ver" && '
# mktemp -d creates the staging dir with mode 700; that survives the
# mv and leaves Caddy (which runs as the `caddy` user, not root)
# unable to traverse /opt/furtka/current/ when it tries to serve
# the landing page. Open up to 755 so file_server can read.
'chmod 755 "/opt/furtka/versions/$ver" && '
'ln -sfn "/opt/furtka/versions/$ver" /opt/furtka/current'
)
systemctl_link = "systemctl link " + " ".join(
f"/opt/furtka/current/assets/systemd/{u}" for u in _FURTKA_UNITS
)
systemctl_enable = "systemctl enable " + " ".join(_FURTKA_UNITS)
return [
extract_and_link,
_write_file_cmd("/usr/local/bin/furtka", _FURTKA_WRAPPER_SH, mode="755"),
systemctl_link,
systemctl_enable,
]
def _furtka_json_cmd(hostname):
"""Write /var/lib/furtka/furtka.json with install-time facts.
Replaces the __HOSTNAME__ sed pass the landing page reads this file
at runtime and renders the hostname chip from it. install_date + version
ride along so the settings page can display them without hitting the
status timer's refresh cycle.
Heredoc rather than base64 + sed the previous version had two layers
of quoting that archinstall's custom_commands shell-eval path parsed
inconsistently, leaving this command as a silent no-op on some installs.
The heredoc evaluates `$(date ...)` and `$(cat VERSION)` at chroot
runtime and sidesteps the quoting hazard entirely. Hostname has already
been validated by validate_step1.
"""
return (
"mkdir -p /var/lib/furtka && "
"cat > /var/lib/furtka/furtka.json <<EOF\n"
"{\n"
f' "hostname": "{hostname}",\n'
' "install_date": "$(date -Iseconds)",\n'
' "version": "$(cat /opt/furtka/current/VERSION 2>/dev/null || echo dev)"\n'
"}\n"
"EOF"
)
def _users_json_cmd(username, password):
"""Write /var/lib/furtka/users.json with the admin account hashed.
The core furtka-api reads this file on every login attempt; the
auth.py module treats `admin.username` + `admin.hash` as the only
credential. Hashing happens here in the webinstaller (werkzeug is a
flask transitive dep so it's already installed in this environment)
the chroot doesn't need pip. Mode 0600 so nobody but root on the
installed box can read the PBKDF2 hash.
"""
from datetime import datetime
from werkzeug.security import generate_password_hash
users = {
"admin": {
"username": username,
"hash": generate_password_hash(password),
"created_at": datetime.now(UTC).isoformat(timespec="seconds"),
}
}
return _write_file_cmd(
"/var/lib/furtka/users.json",
json.dumps(users, indent=2) + "\n",
mode="600",
)
def _post_install_commands(hostname, admin_username, admin_password):
# nss-mdns: splice `mdns_minimal [NOTFOUND=return]` before `resolve` on
# the hosts line so `*.local` works from the installed system too. Guarded
# so a re-run (or a future Arch default that already ships mdns) is a
# no-op instead of double-injecting.
nss_sed = (
"grep -q 'mdns_minimal' /etc/nsswitch.conf || "
"sed -i '/^hosts:/ s/resolve/mdns_minimal [NOTFOUND=return] resolve/' "
"/etc/nsswitch.conf"
)
return [
# Import dir for the HTTP→HTTPS force-redirect snippet. The
# /api/furtka/https/force endpoint writes/removes a .caddyfile here
# to toggle the redirect. Must exist before Caddy starts — the
# Caddyfile's glob `import /etc/caddy/furtka.d/*.caddyfile` tolerates
# an empty dir but not a missing one on every Caddy version, so we
# create it up front and stay on the safe side.
"install -d -m 0755 -o root -g root /etc/caddy/furtka.d",
# Parallel dir for the top-level HTTPS-listener snippet, written
# by /api/furtka/https/force (26.15-alpha+) when the user opts
# into HTTPS. Empty by default so fresh installs never generate
# a tls internal cert — that was the 26.14 regression where
# Firefox hit unbypassable SEC_ERROR_BAD_SIGNATURE because
# Caddy's fixed intermediate-CN clashed with any cached trust
# from a previously-reinstalled Furtka box.
"install -d -m 0755 -o root -g root /etc/caddy/furtka-https.d",
# The Caddyfile lives at /etc/caddy/Caddyfile per Caddy's convention
# (systemd unit points there). Content comes from the shipped asset,
# which we copy in at install time so updates that change routing
# need a new release to refresh it.
#
# __FURTKA_HOSTNAME__ is the placeholder the asset carries in place
# of the real hostname — Caddy's `tls internal` needs a named site
# block to issue a leaf cert, and the hostname isn't known until
# the user fills in the form. Self-updates re-apply the same
# substitution against /etc/hostname (see updater._refresh_caddyfile).
_write_file_cmd(
"/etc/caddy/Caddyfile",
_read_asset("Caddyfile").replace("__FURTKA_HOSTNAME__", hostname),
),
# Initial status.json so Caddy doesn't 404 before furtka-status fires.
_write_file_cmd("/var/lib/furtka/status.json", _read_asset("www/status.json")),
nss_sed,
# Resource manager bootstrap: extract tarball → versions/<ver>/,
# symlink current, install wrapper, systemctl-link unit files.
*_resource_manager_commands(),
# furtka.json depends on /opt/furtka/current/VERSION, so it has to
# run after the resource-manager commands.
_furtka_json_cmd(hostname),
# Admin account for the Furtka web UI. Hashed here (werkzeug is
# already in scope for the Flask webinstaller) and materialised
# into /var/lib/furtka/users.json at mode 0600 on the target
# partition — the installed core's auth.py picks it up on first
# login.
_users_json_cmd(admin_username, admin_password),
]
def _detect_bootloader():
# systemd-boot is UEFI-only; on BIOS/legacy it trips HardwareIncompatibilityError
# inside archinstall. /sys/firmware/efi exists iff we were booted via UEFI.
return "Systemd-boot" if Path("/sys/firmware/efi").exists() else "Grub"
def build_archinstall_config(s):
return {
"archinstall-language": "English",
"timezone": "Europe/Berlin",
"ntp": True,
"bootloader": _detect_bootloader(),
"disk_config": build_disk_config(s["boot_drive"]),
"hostname": s["hostname"],
"kernels": ["linux"],
"packages": [
"docker",
"docker-compose",
# Editors for console/SSH recovery — `nano` is the beginner-friendly
# one, `vim` stays because it's muscle-memory for the dev team.
"nano",
"vim",
"git",
"htop",
"curl",
# Remote access — archinstall 4.x's `ssh: True` flag is flaky about
# actually pulling in openssh, so list it explicitly and enable sshd
# via `services` below. Without this, the documented recovery path
# (SSH in → edit .env) doesn't work.
"openssh",
# Base OS post-install (landing page + mDNS on installed system).
"caddy",
"avahi",
"nss-mdns",
# Resource manager runtime — pure-stdlib Python, no pip needed
# because we expose the package via PYTHONPATH in /usr/local/bin/furtka.
"python",
],
"profile": {"type": "server"},
"services": [
"docker",
# Base OS post-install services. Only packaged units go here —
# archinstall runs `systemctl enable` on this list *before*
# custom_commands, so our own furtka-welcome + furtka-status.timer
# units (written in custom_commands) are enabled there instead.
"caddy",
"avahi-daemon",
"sshd",
],
# `gpasswd -a <user> docker` has to stay first — adds the user to
# the docker group once the group exists (archinstall creates users
# before pacstrapping extras). After that we drop the Furtka landing
# page, status timer, and welcome banner into place.
"custom_commands": [
f"gpasswd -a {s['username']} docker",
*_post_install_commands(s["hostname"], s["username"], s["password"]),
],
"network_config": {"type": "iso"},
"ssh": True,
"audio_config": None,
"locale_config": {
"locale": LANGUAGES[s["language"]]["locale"],
# Keyboard layout follows the chosen language so a German user
# doesn't get a US layout at the TTY console (where things like
# `/`, `-`, `=` land on surprising keys and make even `sudo vim`
# painful). `en` falls through to "us" which is what we want.
"keyboard_layout": LANGUAGES[s["language"]]["keyboard"],
},
}
def build_archinstall_creds(s):
# archinstall 4.x expects `!root-password` and `!password` (plaintext
# sentinels). Users with neither `!password` nor `enc_password` are
# silently dropped by User.parse_arguments — hence login failures.
return {
"!root-password": s["password"],
"users": [
{
"username": s["username"],
"!password": s["password"],
"sudo": True,
"groups": [],
}
],
}
def write_install_files(s, state_dir):
state_dir.mkdir(parents=True, exist_ok=True)
config_path = state_dir / "user_configuration.json"
creds_path = state_dir / "user_credentials.json"
config_path.write_text(json.dumps(build_archinstall_config(s), indent=2))
creds_path.write_text(json.dumps(build_archinstall_creds(s), indent=2))
creds_path.chmod(0o600)
return config_path, creds_path
def spawn_archinstall(config_path, creds_path, log_path):
log_fh = open(log_path, "wb")
return subprocess.Popen(
[
"archinstall",
"--config",
str(config_path),
"--creds",
str(creds_path),
"--silent",
],
stdout=log_fh,
stderr=subprocess.STDOUT,
start_new_session=True,
)
@app.route("/") @app.route("/")
def home(): def home():
return redirect(url_for("install_step_1")) return "Hello World"
@app.route("/install/step1", methods=["GET", "POST"]) @app.route("/install/step1", methods=["GET", "POST"])
def install_step_1(): def install_step_1():
errors = []
if request.method == "POST": if request.method == "POST":
errors, values = validate_step1(request.form) settings["hostname"] = request.form["hostname"]
if not errors:
settings.update(values)
return redirect(url_for("install_step_2")) return redirect(url_for("install_step_2"))
form_values = values return render_template("install/step1.html")
else:
form_values = {k: settings[k] for k in ("hostname", "username", "language")}
return render_template(
"install/step1.html",
values=form_values,
languages=LANGUAGES,
errors=errors,
)
@app.route("/install/step2", methods=["GET", "POST"]) @app.route("/install/step2", methods=["GET", "POST"])
def install_step_2(): def install_step_2():
if request.method == "POST": if request.method == "POST":
boot_drive = request.form.get("boot_drive", "").strip() settings["boot_drive_uuid"] = request.form["boot_drive_uuid"]
if boot_drive:
settings["boot_drive"] = boot_drive
return redirect(url_for("install_overview")) return redirect(url_for("install_overview"))
return render_template( return render_template("install/step2.html", drives=list_scored_devices())
"install/step2.html",
drives=list_scored_devices(),
selected=settings.get("boot_drive", ""),
)
@app.route("/install/overview") @app.route("/install/overview")
def install_overview(): def install_overview():
masked = {**settings, "password": "" * 8 if settings["password"] else ""} return render_template("install/overview.html", settings=settings)
return render_template("install/overview.html", settings=masked)
@app.route("/install/run", methods=["POST"])
def install_run():
if not settings["boot_drive"] or not settings["username"] or not settings["password"]:
return redirect(url_for("install_step_1"))
config_path, creds_path = write_install_files(settings, STATE_DIR)
INSTALL_LOG.write_bytes(b"")
if os.environ.get("FURTKA_DRY_RUN") == "1":
INSTALL_LOG.write_text(
f"DRY RUN: would exec archinstall --config {config_path} "
f"--creds {creds_path} --silent\n"
)
else:
spawn_archinstall(config_path, creds_path, INSTALL_LOG)
return redirect(url_for("install_log_view"))
@app.route("/install/log")
def install_log_view():
log = INSTALL_LOG.read_text() if INSTALL_LOG.exists() else ""
return render_template(
"install/log.html",
log=log,
progress=parse_install_progress(log),
)
@app.route("/install/log.json")
def install_log_json():
log = INSTALL_LOG.read_text() if INSTALL_LOG.exists() else ""
return jsonify(log=log, progress=parse_install_progress(log))
@app.route("/install/reboot", methods=["POST"])
def install_reboot():
# Only allow rebooting once the install has actually finished — we don't
# want a panicked click during install to reboot mid-pacstrap.
log = INSTALL_LOG.read_text() if INSTALL_LOG.exists() else ""
if parse_install_progress(log)["status"] != "done":
return redirect(url_for("install_log_view"))
# Delay reboot a few seconds so the browser can finish fetching CSS / assets
# for the rebooting page before the Flask server (and network) go away.
# Without this, the reboot page renders unstyled (giant inline SVG icon).
subprocess.Popen(
["/bin/sh", "-c", "sleep 3 && /usr/bin/systemctl reboot"],
start_new_session=True,
)
return render_template("install/rebooting.html", hostname=settings["hostname"])
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -1,42 +1,7 @@
import subprocess import subprocess
def _boot_disk_name(): def get_drive_health(device):
"""Return the parent disk name of the live-ISO boot media (e.g. "sdb"), or None.
On a normal box `/run/archiso/bootmnt` does not exist and we return None,
leaving the device list untouched. On bare metal booted from USB this is
the stick we booted from we want to filter it out so the user can't
accidentally pick it as the install target.
"""
try:
result = subprocess.run(
["findmnt", "-no", "SOURCE", "/run/archiso/bootmnt"],
capture_output=True,
text=True,
)
except FileNotFoundError:
return None
if result.returncode != 0:
return None
partition = result.stdout.strip()
if not partition:
return None
try:
parent = subprocess.run(
["lsblk", "-no", "PKNAME", partition],
capture_output=True,
text=True,
)
except FileNotFoundError:
return None
if parent.returncode != 0:
return None
name = parent.stdout.strip().splitlines()[0] if parent.stdout.strip() else ""
return name or None
def _smart_status(device):
try: try:
result = subprocess.run( result = subprocess.run(
["smartctl", "-H", device], ["smartctl", "-H", device],
@ -44,25 +9,13 @@ def _smart_status(device):
) )
output = result.stdout.decode() output = result.stdout.decode()
if "PASSED" in output: if "PASSED" in output:
return "passed" return 10
elif "FAILED" in output: elif "FAILED" in output:
return "failed" return 0
return "unknown" return 5
except Exception as e: except Exception as e:
print(f"Error checking SMART status for {device}: {e}") print(f"Error checking SMART status for {device}: {e}")
return "unknown" return 5
_HEALTH_SCORE = {"passed": 10, "failed": 0, "unknown": 5}
_HEALTH_LABEL = {
"passed": "Healthy",
"failed": "SMART warning",
"unknown": "Status unknown",
}
def get_drive_health(device):
return _HEALTH_SCORE[_smart_status(device)]
def get_drive_type_score(device): def get_drive_type_score(device):
@ -74,15 +27,6 @@ def get_drive_type_score(device):
return 5 return 5
def get_drive_type_label(device):
name = device.lower()
if "nvme" in name:
return "NVMe"
if "ssd" in name:
return "SSD"
return "HDD"
def parse_size_gb(size_str): def parse_size_gb(size_str):
size_str = size_str.strip().upper().replace(",", ".") size_str = size_str.strip().upper().replace(",", ".")
if not size_str: if not size_str:
@ -110,57 +54,36 @@ def score_device(device, size_gb):
return get_drive_type_score(device) + get_drive_health(device) + get_size_score(size_gb) return get_drive_type_score(device) + get_drive_health(device) + get_size_score(size_gb)
def parse_lsblk_output(output, boot_disk=None):
"""Parse `lsblk -dn -o NAME,SIZE,TYPE` output into scored device dicts.
Keeps only TYPE=disk so the live ISO's own squashfs (loop) and the boot
CD-ROM (rom) don't show up as install targets. If `boot_disk` is given,
that disk is also dropped it's the USB stick the live ISO booted from
on bare metal, where it appears as TYPE=disk and would otherwise be a
valid-looking install target.
"""
devices = []
for line in output.strip().split("\n"):
if not line:
continue
parts = line.split()
if len(parts) < 3:
continue
name, size, dev_type = parts[0], parts[1], parts[2]
if dev_type != "disk":
continue
if boot_disk and name == boot_disk:
continue
device = f"/dev/{name}"
size_gb = parse_size_gb(size)
status = _smart_status(device)
score = get_drive_type_score(device) + _HEALTH_SCORE[status] + get_size_score(size_gb)
devices.append(
{
"name": device,
"size": size,
"type_label": get_drive_type_label(device),
"health_label": _HEALTH_LABEL[status],
"score": score,
}
)
devices.sort(key=lambda d: d["score"], reverse=True)
return devices
def list_scored_devices(): def list_scored_devices():
"""Return [{name, size, score}, ...] for all physical disks, highest score first.""" """Return [{name, size, score}, ...] for all physical disks, highest score first."""
devices = []
try: try:
result = subprocess.run( result = subprocess.run(
["lsblk", "-dn", "-o", "NAME,SIZE,TYPE"], ["lsblk", "-dn", "-o", "NAME,SIZE"],
capture_output=True, capture_output=True,
text=True, text=True,
check=True, check=True,
) )
for line in result.stdout.strip().split("\n"):
if not line:
continue
parts = line.split()
if len(parts) < 2:
continue
name, size = parts[0], parts[1]
device = f"/dev/{name}"
devices.append(
{
"name": device,
"size": size,
"score": score_device(device, parse_size_gb(size)),
}
)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
print(f"Error listing devices: {e}") print(f"Error listing devices: {e}")
return []
return parse_lsblk_output(result.stdout, boot_disk=_boot_disk_name()) devices.sort(key=lambda d: d["score"], reverse=True)
return devices
def main(): def main():

View file

@ -1,423 +0,0 @@
:root {
--bg: #f7f6f3;
--bg-subtle: #efeee8;
--bg-card: #ffffff;
--fg: #0e0e0f;
--fg-muted: #6b6b6f;
--accent: #c03a28;
--accent-hover: #a0301f;
--border: #e4e3dc;
--danger: #b00020;
--success: #2e7d32;
--font-sans:
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue",
Arial, "Noto Sans", sans-serif;
--font-mono:
ui-monospace, SFMono-Regular, Menlo, Consolas, "Liberation Mono", monospace;
}
@media (prefers-color-scheme: dark) {
:root {
--bg: #0d0d0f;
--bg-subtle: #17171a;
--bg-card: #1c1c20;
--fg: #ececee;
--fg-muted: #8a8a90;
--accent: #ff6b56;
--accent-hover: #ff8b78;
--border: #2a2a2e;
--danger: #ff6b6b;
--success: #6bcf6b;
}
}
* { box-sizing: border-box; }
html { -webkit-text-size-adjust: 100%; }
body {
margin: 0;
background: var(--bg);
color: var(--fg);
font-family: var(--font-sans);
font-size: 1.0625rem;
line-height: 1.6;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
display: flex;
flex-direction: column;
min-height: 100vh;
}
.container {
max-width: 44rem;
margin-inline: auto;
padding-inline: 1.5rem;
width: 100%;
}
main.container {
flex: 1;
padding-block: 2.5rem 4rem;
}
a {
color: var(--accent);
text-decoration: underline;
text-decoration-color: color-mix(in srgb, var(--accent) 35%, transparent);
text-underline-offset: 3px;
}
a:hover { color: var(--accent-hover); }
/* ── Header ─────────────────────────────────────────────────── */
.site-header {
border-bottom: 1px solid var(--border);
padding-block: 1rem;
}
.site-header .container {
display: flex;
align-items: center;
gap: 1rem;
}
.site-title {
display: inline-flex;
align-items: center;
gap: 0.5rem;
text-decoration: none;
color: inherit;
margin-right: auto;
}
.gate-mark {
color: var(--accent);
width: 1.15em;
height: 1.15em;
vertical-align: -0.15em;
}
.wordmark {
font-weight: 600;
letter-spacing: 0.14em;
color: var(--fg);
text-transform: uppercase;
font-size: 0.78rem;
}
.wordmark .sep { color: var(--fg-muted); margin: 0 0.4em; }
.step-indicator {
font-family: var(--font-sans);
font-weight: 500;
font-size: 0.72rem;
letter-spacing: 0.14em;
text-transform: uppercase;
color: var(--fg-muted);
}
/* ── Page heading ───────────────────────────────────────────── */
h1 {
font-family: var(--font-sans);
font-weight: 800;
font-size: clamp(2rem, 5vw, 2.75rem);
line-height: 1.05;
letter-spacing: -0.025em;
margin: 0 0 0.5rem;
}
.lede {
color: var(--fg-muted);
margin: 0 0 2rem;
font-size: 1.05rem;
}
.muted {
color: var(--fg-muted);
font-size: 0.9rem;
}
/* ── Forms ──────────────────────────────────────────────────── */
form { margin: 0; }
.field {
display: block;
margin-bottom: 1.25rem;
}
.field > label {
display: block;
font-weight: 600;
font-size: 0.92rem;
margin-bottom: 0.35rem;
color: var(--fg);
}
.field .hint {
display: block;
font-size: 0.82rem;
color: var(--fg-muted);
margin-top: 0.3rem;
}
input[type="text"],
input[type="password"],
select {
width: 100%;
padding: 0.65rem 0.85rem;
font-family: var(--font-sans);
font-size: 1rem;
background: var(--bg-card);
color: var(--fg);
border: 1px solid var(--border);
border-radius: 6px;
transition: border-color 120ms, box-shadow 120ms;
}
input[type="text"]:focus,
input[type="password"]:focus,
select:focus {
outline: none;
border-color: var(--accent);
box-shadow: 0 0 0 3px color-mix(in srgb, var(--accent) 20%, transparent);
}
/* ── Drive radio cards ──────────────────────────────────────── */
.drive-list {
display: flex;
flex-direction: column;
gap: 0.5rem;
margin-bottom: 1.5rem;
}
.drive {
display: flex;
align-items: center;
gap: 0.85rem;
padding: 0.85rem 1rem;
background: var(--bg-card);
border: 1px solid var(--border);
border-radius: 8px;
cursor: pointer;
transition: border-color 120ms, background 120ms;
}
.drive:hover { border-color: color-mix(in srgb, var(--accent) 50%, var(--border)); }
.drive input[type="radio"] {
accent-color: var(--accent);
width: 1.1rem;
height: 1.1rem;
margin: 0;
}
.drive:has(input:checked) {
border-color: var(--accent);
background: color-mix(in srgb, var(--accent) 6%, var(--bg-card));
}
.drive .name {
font-family: var(--font-mono);
font-weight: 600;
font-size: 0.95rem;
}
.drive .meta {
margin-left: auto;
display: flex;
gap: 0.5rem;
align-items: center;
font-size: 0.85rem;
color: var(--fg-muted);
}
.drive .chip {
background: var(--bg-subtle);
padding: 0.15rem 0.5rem;
border-radius: 999px;
font-size: 0.78rem;
}
.drive .badge-recommended {
background: color-mix(in srgb, var(--accent) 18%, var(--bg-card));
color: var(--accent);
padding: 0.15rem 0.55rem;
border-radius: 999px;
font-size: 0.75rem;
font-weight: 600;
letter-spacing: 0.02em;
text-transform: uppercase;
}
/* ── Summary table (overview) ───────────────────────────────── */
.summary {
background: var(--bg-card);
border: 1px solid var(--border);
border-radius: 8px;
overflow: hidden;
margin-bottom: 1.5rem;
}
.summary table {
width: 100%;
border-collapse: collapse;
}
.summary td {
padding: 0.65rem 1rem;
border-bottom: 1px solid var(--border);
font-size: 0.95rem;
}
.summary tr:last-child td { border-bottom: none; }
.summary td:first-child {
color: var(--fg-muted);
width: 12rem;
text-transform: capitalize;
}
.summary td:last-child {
font-family: var(--font-mono);
font-size: 0.9rem;
}
/* ── Buttons ────────────────────────────────────────────────── */
.actions {
display: flex;
align-items: center;
gap: 1rem;
margin-top: 1.5rem;
flex-wrap: wrap;
}
.btn {
display: inline-block;
padding: 0.7rem 1.25rem;
font-family: var(--font-sans);
font-size: 0.95rem;
font-weight: 600;
letter-spacing: 0.01em;
border: 1px solid transparent;
border-radius: 6px;
cursor: pointer;
text-decoration: none;
transition: background 120ms, border-color 120ms, color 120ms;
}
.btn-primary {
background: var(--accent);
color: #fff;
}
.btn-primary:hover { background: var(--accent-hover); color: #fff; }
.btn-primary:disabled {
background: var(--bg-subtle);
color: var(--fg-muted);
cursor: not-allowed;
}
.btn-danger {
background: var(--danger);
color: #fff;
}
.btn-danger:hover { filter: brightness(1.1); color: #fff; }
.btn-link {
background: transparent;
color: var(--fg-muted);
padding: 0.7rem 0;
border: none;
text-decoration: none;
}
.btn-link:hover { color: var(--accent); }
/* ── Alerts ─────────────────────────────────────────────────── */
.alert {
padding: 0.85rem 1rem;
border-radius: 6px;
margin-bottom: 1.5rem;
border: 1px solid;
font-size: 0.92rem;
}
.alert-error {
background: color-mix(in srgb, var(--danger) 8%, var(--bg-card));
border-color: color-mix(in srgb, var(--danger) 35%, transparent);
color: var(--danger);
}
.alert-warn {
background: color-mix(in srgb, var(--accent) 8%, var(--bg-card));
border-color: color-mix(in srgb, var(--accent) 35%, transparent);
color: var(--accent);
}
.alert ul { margin: 0.4rem 0 0; padding-left: 1.2rem; }
.alert li { margin-bottom: 0.2rem; }
/* ── Log pane ───────────────────────────────────────────────── */
.log {
background: #0d0d0f;
color: #e0e0e3;
border: 1px solid var(--border);
border-radius: 8px;
padding: 1rem 1.2rem;
font-family: var(--font-mono);
font-size: 0.82rem;
line-height: 1.5;
max-height: 65vh;
overflow: auto;
white-space: pre-wrap;
word-break: break-word;
margin: 0;
}
.progress {
background: var(--bg-subtle);
border: 1px solid var(--border);
border-radius: 999px;
height: 12px;
overflow: hidden;
margin: 1.5rem 0 0.5rem;
}
.progress-bar {
height: 100%;
background: linear-gradient(
90deg,
var(--accent),
color-mix(in srgb, var(--accent) 55%, #fff)
);
transition: width 0.6s ease;
border-radius: 999px;
}
.progress-bar-error { background: var(--danger); }
.progress-bar-done { background: var(--success); }
.progress-phase {
margin: 0.3rem 0 1.5rem;
color: var(--fg-muted);
font-size: 0.95rem;
}
.log-details {
margin-top: 1.5rem;
}
.log-details summary {
cursor: pointer;
font-size: 0.9rem;
color: var(--fg-muted);
user-select: none;
padding: 0.25rem 0;
}
.log-details summary:hover { color: var(--fg); }
.log-details[open] summary { margin-bottom: 0.6rem; }
/* ── Footer ─────────────────────────────────────────────────── */
.site-footer {
margin-top: auto;
border-top: 1px solid var(--border);
padding-block: 1rem;
}
.site-footer .container {
display: flex;
align-items: center;
justify-content: space-between;
gap: 0.5rem;
flex-wrap: wrap;
}
.site-footer .kicker {
font-family: var(--font-sans);
font-weight: 500;
font-size: 0.72rem;
letter-spacing: 0.14em;
text-transform: uppercase;
color: var(--fg-muted);
margin: 0;
}
/* ── Selection + focus ──────────────────────────────────────── */
::selection { background: var(--accent); color: var(--bg); }
:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 3px;
border-radius: 2px;
}

View file

@ -1,38 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{% block title %}Furtka Installer{% endblock %}</title>
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
{% block head_extra %}{% endblock %}
</head>
<body>
<header class="site-header">
<div class="container">
<a href="{{ url_for('install_step_1') }}" class="site-title" aria-label="Furtka Installer — restart">
<svg class="gate-mark" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" aria-hidden="true">
<path d="M4 20 V12 A9 9 0 0 1 20 12 V20"/>
<line x1="12" y1="5" x2="12" y2="20"/>
<line x1="3" y1="20" x2="21" y2="20"/>
<line x1="15" y1="12" x2="15" y2="14.5"/>
</svg>
<span class="wordmark">Furtka<span class="sep">·</span>Installer</span>
</a>
{% block step_indicator %}{% endblock %}
</div>
</header>
<main class="container">
{% block content %}{% endblock %}
</main>
<footer class="site-footer">
<div class="container">
<p class="kicker">Furtka {{ furtka_version }} · AGPL-3.0</p>
<p class="kicker"><a href="https://furtka.org" style="color: inherit; text-decoration: none">furtka.org</a></p>
</div>
</footer>
</body>
</html>

View file

@ -1,97 +0,0 @@
{% extends "base.html" %}
{% block title %}Installing… · Furtka Installer{% endblock %}
{% block head_extra %}
{# Fallback for users with JS disabled — otherwise the JS below takes over
and updates in-place so the log <details> doesn't re-collapse. #}
<noscript>
{% if progress.status == "running" %}<meta http-equiv="refresh" content="3">{% endif %}
</noscript>
{% endblock %}
{% block step_indicator %}<span class="step-indicator">Installing</span>{% endblock %}
{% block content %}
<h1 id="install-heading">
{% if progress.status == "done" %}Furtka is ready
{% elif progress.status == "error" %}Installation hit a snag
{% else %}Installing Furtka{% endif %}
</h1>
<p class="lede" id="install-lede">
{% if progress.status == "done" %}Installation finished. <strong>Remove the installer USB / eject the ISO</strong>, then click Reboot.
{% elif progress.status == "error" %}Something went wrong. Open the details below and share them so we can help.
{% else %}This takes a few minutes. Don't close this page or power off the machine.{% endif %}
</p>
{% if progress.status == "done" %}
<form method="post" action="{{ url_for('install_reboot') }}"
onsubmit="return confirm('Have you removed the installer USB / ejected the ISO? Click OK to reboot.');">
<div class="actions">
<button type="submit" class="btn btn-primary">Reboot now</button>
</div>
</form>
{% endif %}
<div class="progress" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="{{ progress.percent }}">
<div id="progress-bar" class="progress-bar{% if progress.status == 'error' %} progress-bar-error{% endif %}{% if progress.status == 'done' %} progress-bar-done{% endif %}" style="width: {{ progress.percent }}%;"></div>
</div>
<p class="progress-phase"><span id="progress-phase">{{ progress.phase }}</span> · <span id="progress-percent">{{ progress.percent }}</span>%</p>
<details class="log-details">
<summary>Show details</summary>
<pre id="install-log" class="log">{{ log or "(waiting for install to start)" }}</pre>
</details>
<script>
(function () {
var initialStatus = {{ progress.status | tojson }};
if (initialStatus !== 'running') return;
var bar = document.getElementById('progress-bar');
var phaseEl = document.getElementById('progress-phase');
var percentEl = document.getElementById('progress-percent');
var logEl = document.getElementById('install-log');
var headingEl = document.getElementById('install-heading');
var ledeEl = document.getElementById('install-lede');
var HEADINGS = {
done: 'Furtka is ready',
error: 'Installation hit a snag',
};
var LEDES = {
done: 'Installation finished. Remove the USB / eject the installer image, then reboot.',
error: 'Something went wrong. Open the details below and share them so we can help.',
};
function tick() {
fetch('{{ url_for("install_log_json") }}', { cache: 'no-store' })
.then(function (r) { return r.json(); })
.then(function (data) {
var p = data.progress;
bar.style.width = p.percent + '%';
phaseEl.textContent = p.phase;
percentEl.textContent = p.percent;
if (logEl.textContent !== data.log) {
var atBottom = logEl.scrollTop + logEl.clientHeight >= logEl.scrollHeight - 8;
logEl.textContent = data.log || '(waiting for install to start)';
if (atBottom) logEl.scrollTop = logEl.scrollHeight;
}
if (p.status === 'done') {
// Reload so the server-rendered Done state (with the
// Reboot button) replaces the running-state markup.
window.location.reload();
return;
}
if (p.status === 'error') {
bar.classList.add('progress-bar-error');
headingEl.textContent = HEADINGS.error;
ledeEl.textContent = LEDES.error;
return;
}
setTimeout(tick, 3000);
})
.catch(function () { setTimeout(tick, 3000); });
}
setTimeout(tick, 3000);
})();
</script>
{% endblock %}

View file

@ -1,31 +1,13 @@
{% extends "base.html" %} <!DOCTYPE html>
<html>
{% block title %}Confirm · Furtka Installer{% endblock %} <head>
{% block step_indicator %}<span class="step-indicator">Step 3 of 3</span>{% endblock %} <title>Furtka Install</title>
</head>
{% block content %} <body>
<h1>Confirm install</h1> <h1>Overview</h1>
<p class="lede">Review the settings below. Continuing will <strong>wipe <code>{{ settings.boot_drive }}</code></strong> and install Furtka.</p> <h2>Results:</h2>
{% for k, s in settings.items() %}
<div class="alert alert-warn"> <p>{{k}}: {{s}}</p>
This is destructive. All existing data on the selected boot drive will be lost.
</div>
<div class="summary">
<table>
{% for k, v in settings.items() %}
<tr>
<td>{{ k.replace('_', ' ') }}</td>
<td>{{ v }}</td>
</tr>
{% endfor %} {% endfor %}
</table> </body>
</div> </html>
<form method="post" action="{{ url_for('install_run') }}">
<div class="actions">
<button type="submit" class="btn btn-danger">Install — wipe drive and proceed</button>
<a href="{{ url_for('install_step_1') }}" class="btn btn-link">← Start over</a>
</div>
</form>
{% endblock %}

View file

@ -1,14 +0,0 @@
{% extends "base.html" %}
{% block title %}Rebooting · Furtka Installer{% endblock %}
{% block step_indicator %}<span class="step-indicator">Done</span>{% endblock %}
{% block content %}
<h1>Rebooting…</h1>
<p class="lede">The machine is restarting. This page will stop responding in a moment — that's expected.</p>
<p><strong>Remove the USB stick now</strong> — if it's still plugged in when the machine reboots, some BIOS setups will boot into this installer again instead of starting Furtka.</p>
<p class="muted">If the installer does come back anyway, your BIOS is set to boot from USB before the disk. Press the one-time boot menu key at startup (often <kbd>F11</kbd>, <kbd>F12</kbd>, or <kbd>Esc</kbd> — it flashes briefly on screen) and pick the internal disk, or change the boot order in BIOS settings.</p>
<p>When the machine comes back up (~1 minute), open Furtka in your browser:</p>
<p><a href="http://{{ hostname }}.local" class="btn btn-primary">http://{{ hostname }}.local</a></p>
<p class="muted">If that doesn't resolve, your network may not support mDNS — use the IP address shown on the machine's console instead.</p>
{% endblock %}

Some files were not shown because too many files have changed in this diff Show more