furtka/tests/test_install_runner.py

481 lines
16 KiB
Python
Raw Normal View History

feat(install): async background install with progress polling POST /api/apps/install now returns 202 Accepted after the synchronous pre-validation (resolve source, copy files, write .env, check for placeholder secrets, validate path-type settings). The docker-facing phases (compose pull → ensure volumes → compose up) are dispatched as a background systemd-run unit (furtka-install-<app>) that writes stage transitions to /var/lib/furtka/install-state.json. The UI polls GET /api/apps/install/status every 1.5s and re-labels the modal submit button — "Image wird heruntergeladen…" → "Speicherbereiche werden erstellt…" → "Container wird gestartet…" — instead of sitting dead on "Installing…" for 30+ seconds on large images like Jellyfin. Mirrors the exact shape of /api/catalog/sync/apply and /api/furtka/update/apply: same fcntl lock, same atomic state-file writes, same terminal-state poll loop ("done" | "error"). New CLI subcommand `furtka app install-bg <name>` is what systemd-run invokes; it's hidden from --help because regular CLI users still want the synchronous `furtka app install <name>`. Reinstall button on the app list polls too — after dispatch, its text reflects the background stage until terminal, matching the modal flow. Tests: - tests/test_install_runner.py (new, 9 cases): state roundtrip, lock contention, happy-path phase ordering, error writes on pull/up failure, lock release on both terminal outcomes. - tests/test_api.py: new no_systemd_run fixture stubs subprocess.run; existing install tests adapted to 202 response; new tests for 409 lock contention and the status endpoint. - tests/test_cli.py: install-bg dispatches correctly and returns 1 on failure with journald-friendly stderr. 256 tests pass, ruff check + format clean. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 15:50:49 +02:00
"""Tests for the background app-install runner.
Same shape as test_catalog.py / test_updater.py: fixture reloads the
module with env-overridden paths, dockerops calls are stubbed so nothing
touches a real daemon. Asserts that state transitions happen in the
right order and that exceptions flip the state to "error" with the
message before re-raising.
"""
from __future__ import annotations
import json
from pathlib import Path
import pytest
@pytest.fixture
def runner(tmp_path, monkeypatch):
apps = tmp_path / "apps"
apps.mkdir()
monkeypatch.setenv("FURTKA_APPS_DIR", str(apps))
monkeypatch.setenv("FURTKA_INSTALL_STATE", str(tmp_path / "install-state.json"))
monkeypatch.setenv("FURTKA_INSTALL_PLAN", str(tmp_path / "install-plan.json"))
feat(install): async background install with progress polling POST /api/apps/install now returns 202 Accepted after the synchronous pre-validation (resolve source, copy files, write .env, check for placeholder secrets, validate path-type settings). The docker-facing phases (compose pull → ensure volumes → compose up) are dispatched as a background systemd-run unit (furtka-install-<app>) that writes stage transitions to /var/lib/furtka/install-state.json. The UI polls GET /api/apps/install/status every 1.5s and re-labels the modal submit button — "Image wird heruntergeladen…" → "Speicherbereiche werden erstellt…" → "Container wird gestartet…" — instead of sitting dead on "Installing…" for 30+ seconds on large images like Jellyfin. Mirrors the exact shape of /api/catalog/sync/apply and /api/furtka/update/apply: same fcntl lock, same atomic state-file writes, same terminal-state poll loop ("done" | "error"). New CLI subcommand `furtka app install-bg <name>` is what systemd-run invokes; it's hidden from --help because regular CLI users still want the synchronous `furtka app install <name>`. Reinstall button on the app list polls too — after dispatch, its text reflects the background stage until terminal, matching the modal flow. Tests: - tests/test_install_runner.py (new, 9 cases): state roundtrip, lock contention, happy-path phase ordering, error writes on pull/up failure, lock release on both terminal outcomes. - tests/test_api.py: new no_systemd_run fixture stubs subprocess.run; existing install tests adapted to 202 response; new tests for 409 lock contention and the status endpoint. - tests/test_cli.py: install-bg dispatches correctly and returns 1 on failure with journald-friendly stderr. 256 tests pass, ruff check + format clean. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 15:50:49 +02:00
monkeypatch.setenv("FURTKA_INSTALL_LOCK", str(tmp_path / "install.lock"))
import importlib
from furtka import install_runner as r
from furtka import paths as p
importlib.reload(p)
importlib.reload(r)
return r
def _write_installed_app(apps_dir: Path, name: str = "fileshare", **overrides):
feat(install): async background install with progress polling POST /api/apps/install now returns 202 Accepted after the synchronous pre-validation (resolve source, copy files, write .env, check for placeholder secrets, validate path-type settings). The docker-facing phases (compose pull → ensure volumes → compose up) are dispatched as a background systemd-run unit (furtka-install-<app>) that writes stage transitions to /var/lib/furtka/install-state.json. The UI polls GET /api/apps/install/status every 1.5s and re-labels the modal submit button — "Image wird heruntergeladen…" → "Speicherbereiche werden erstellt…" → "Container wird gestartet…" — instead of sitting dead on "Installing…" for 30+ seconds on large images like Jellyfin. Mirrors the exact shape of /api/catalog/sync/apply and /api/furtka/update/apply: same fcntl lock, same atomic state-file writes, same terminal-state poll loop ("done" | "error"). New CLI subcommand `furtka app install-bg <name>` is what systemd-run invokes; it's hidden from --help because regular CLI users still want the synchronous `furtka app install <name>`. Reinstall button on the app list polls too — after dispatch, its text reflects the background stage until terminal, matching the modal flow. Tests: - tests/test_install_runner.py (new, 9 cases): state roundtrip, lock contention, happy-path phase ordering, error writes on pull/up failure, lock release on both terminal outcomes. - tests/test_api.py: new no_systemd_run fixture stubs subprocess.run; existing install tests adapted to 202 response; new tests for 409 lock contention and the status endpoint. - tests/test_cli.py: install-bg dispatches correctly and returns 1 on failure with journald-friendly stderr. 256 tests pass, ruff check + format clean. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 15:50:49 +02:00
app = apps_dir / name
app.mkdir()
manifest = {
"name": name,
"display_name": "Fileshare",
"version": "0.1.0",
"description": "Test fixture",
"volumes": ["files"],
"ports": [445],
"icon": "icon.svg",
**overrides,
feat(install): async background install with progress polling POST /api/apps/install now returns 202 Accepted after the synchronous pre-validation (resolve source, copy files, write .env, check for placeholder secrets, validate path-type settings). The docker-facing phases (compose pull → ensure volumes → compose up) are dispatched as a background systemd-run unit (furtka-install-<app>) that writes stage transitions to /var/lib/furtka/install-state.json. The UI polls GET /api/apps/install/status every 1.5s and re-labels the modal submit button — "Image wird heruntergeladen…" → "Speicherbereiche werden erstellt…" → "Container wird gestartet…" — instead of sitting dead on "Installing…" for 30+ seconds on large images like Jellyfin. Mirrors the exact shape of /api/catalog/sync/apply and /api/furtka/update/apply: same fcntl lock, same atomic state-file writes, same terminal-state poll loop ("done" | "error"). New CLI subcommand `furtka app install-bg <name>` is what systemd-run invokes; it's hidden from --help because regular CLI users still want the synchronous `furtka app install <name>`. Reinstall button on the app list polls too — after dispatch, its text reflects the background stage until terminal, matching the modal flow. Tests: - tests/test_install_runner.py (new, 9 cases): state roundtrip, lock contention, happy-path phase ordering, error writes on pull/up failure, lock release on both terminal outcomes. - tests/test_api.py: new no_systemd_run fixture stubs subprocess.run; existing install tests adapted to 202 response; new tests for 409 lock contention and the status endpoint. - tests/test_cli.py: install-bg dispatches correctly and returns 1 on failure with journald-friendly stderr. 256 tests pass, ruff check + format clean. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 15:50:49 +02:00
}
(app / "manifest.json").write_text(json.dumps(manifest))
(app / "docker-compose.yaml").write_text("services: {}\n")
return app
def test_write_and_read_state_round_trip(runner):
runner.write_state("pulling_image", app="jellyfin")
s = runner.read_state()
assert s["stage"] == "pulling_image"
assert s["app"] == "jellyfin"
assert "updated_at" in s
def test_read_state_returns_empty_when_missing(runner):
assert runner.read_state() == {}
def test_read_state_returns_empty_on_junk(runner):
runner.state_path().parent.mkdir(parents=True, exist_ok=True)
runner.state_path().write_text("{not json")
assert runner.read_state() == {}
def test_acquire_lock_prevents_concurrent_runs(runner):
held = runner.acquire_lock()
try:
with pytest.raises(runner.InstallRunnerError, match="in progress"):
runner.acquire_lock()
finally:
held.close()
def test_run_install_happy_path(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
calls = []
monkeypatch.setattr(dockerops, "compose_pull", lambda *a, **k: calls.append(("pull", a)))
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: calls.append(("vol", name)))
monkeypatch.setattr(dockerops, "compose_up", lambda *a, **k: calls.append(("up", a)))
runner.run_install("fileshare")
# Ordering: pull first, then volumes, then up.
assert [c[0] for c in calls] == ["pull", "vol", "up"]
# Exactly the namespaced volume name got created.
assert calls[1] == ("vol", "furtka_fileshare_files")
# Final state is "done" with the manifest version.
s = runner.read_state()
assert s["stage"] == "done"
assert s["app"] == "fileshare"
assert s["version"] == "0.1.0"
def test_run_install_writes_error_on_pull_failure(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
def boom(*a, **k):
raise dockerops.DockerError("pull failed: registry unreachable")
monkeypatch.setattr(dockerops, "compose_pull", boom)
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: None)
monkeypatch.setattr(dockerops, "compose_up", lambda *a, **k: None)
with pytest.raises(dockerops.DockerError):
runner.run_install("fileshare")
s = runner.read_state()
assert s["stage"] == "error"
assert s["app"] == "fileshare"
assert "registry unreachable" in s["error"]
def test_run_install_writes_error_on_up_failure(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
monkeypatch.setattr(dockerops, "compose_pull", lambda *a, **k: None)
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: None)
def boom(*a, **k):
raise dockerops.DockerError("compose up: container refused to start")
monkeypatch.setattr(dockerops, "compose_up", boom)
with pytest.raises(dockerops.DockerError):
runner.run_install("fileshare")
s = runner.read_state()
assert s["stage"] == "error"
assert "refused to start" in s["error"]
def test_run_install_releases_lock_after_done(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
monkeypatch.setattr(dockerops, "compose_pull", lambda *a, **k: None)
monkeypatch.setattr(dockerops, "ensure_volume", lambda name: None)
monkeypatch.setattr(dockerops, "compose_up", lambda *a, **k: None)
runner.run_install("fileshare")
# Lock released — a fresh acquire must succeed.
fh = runner.acquire_lock()
fh.close()
def test_run_install_releases_lock_after_error(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
monkeypatch.setattr(
dockerops, "compose_pull", lambda *a, **k: (_ for _ in ()).throw(dockerops.DockerError("x"))
)
with pytest.raises(dockerops.DockerError):
runner.run_install("fileshare")
fh = runner.acquire_lock()
fh.close()
# --- plan-aware multi-app installs -------------------------------------------
def _write_plan(plan_path: Path, target: str, to_install: list[str]) -> None:
plan_path.write_text(json.dumps({"target": target, "to_install": to_install}))
def _stub_docker_ops(monkeypatch, calls: list):
import furtka.dockerops as dockerops
def _pull(app_dir, project):
calls.append(("pull", project))
def _vol(name):
calls.append(("vol", name))
def _up(app_dir, project):
calls.append(("up", project))
monkeypatch.setattr(dockerops, "compose_pull", _pull)
monkeypatch.setattr(dockerops, "ensure_volume", _vol)
monkeypatch.setattr(dockerops, "compose_up", _up)
def test_run_install_iterates_plan_order(runner, monkeypatch):
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "mosquitto")
_write_installed_app(
apps_dir(),
"zigbee2mqtt",
requires=[{"app": "mosquitto"}],
)
_write_plan(runner.plan_path(), "zigbee2mqtt", ["mosquitto", "zigbee2mqtt"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
runner.run_install("zigbee2mqtt")
# mosquitto fully reconciled before zigbee2mqtt starts.
assert [c for c in calls if c[0] == "pull"] == [("pull", "mosquitto"), ("pull", "zigbee2mqtt")]
assert [c for c in calls if c[0] == "up"] == [("up", "mosquitto"), ("up", "zigbee2mqtt")]
s = runner.read_state()
assert s["stage"] == "done"
assert s["target"] == "zigbee2mqtt"
assert s["app"] == "zigbee2mqtt"
def test_run_install_fires_on_install_hook_against_provider(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
mosq = _write_installed_app(apps_dir(), "mosquitto")
# Provider ships a hook script.
(mosq / "hooks").mkdir()
hook = mosq / "hooks" / "create-user.sh"
hook.write_bytes(b"#!/bin/sh\necho MQTT_USER=z2m\necho MQTT_PASS=hunter2\n")
consumer = _write_installed_app(
apps_dir(),
"zigbee2mqtt",
requires=[{"app": "mosquitto", "on_install": "hooks/create-user.sh"}],
)
# Consumer's .env starts empty.
(consumer / ".env").write_text("")
_write_plan(runner.plan_path(), "zigbee2mqtt", ["mosquitto", "zigbee2mqtt"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
captured = {}
def fake_exec_script(app_dir, project, service, script_path, *, env, timeout):
captured["app_dir"] = app_dir
captured["project"] = project
captured["service"] = service
captured["script_path"] = script_path
captured["env"] = env
captured["timeout"] = timeout
return "MQTT_USER=z2m\nMQTT_PASS=hunter2\n"
# Tell the provider_exec_service helper to pick a deterministic service.
monkeypatch.setattr(
dockerops, "compose_image_tags", lambda a, p: {"mosquitto": "eclipse-mosquitto:2"}
)
monkeypatch.setattr(dockerops, "compose_exec_script", fake_exec_script)
runner.run_install("zigbee2mqtt")
# Hook was called against the provider, with the consumer's name + version
# in env, and the timeout we expect.
assert captured["project"] == "mosquitto"
assert captured["service"] == "mosquitto"
assert captured["script_path"] == hook
assert captured["env"] == {
"FURTKA_CONSUMER_APP": "zigbee2mqtt",
"FURTKA_CONSUMER_VERSION": "0.1.0",
}
assert captured["timeout"] == 60.0
# Consumer's .env now has the hook output.
env_text = (consumer / ".env").read_text()
assert "MQTT_USER=z2m" in env_text
assert "MQTT_PASS=hunter2" in env_text
# Mode 0600.
assert (consumer / ".env").stat().st_mode & 0o777 == 0o600
def test_run_install_hook_furtka_json_sentinel(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "mosquitto")
consumer = _write_installed_app(
apps_dir(),
"z2m",
requires=[{"app": "mosquitto", "on_install": "hooks/x.sh"}],
)
(apps_dir() / "mosquitto" / "hooks").mkdir()
(apps_dir() / "mosquitto" / "hooks" / "x.sh").write_bytes(b"")
(consumer / ".env").write_text("")
_write_plan(runner.plan_path(), "z2m", ["mosquitto", "z2m"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
monkeypatch.setattr(dockerops, "compose_image_tags", lambda a, p: {"mosquitto": "img"})
# Hook output mixes plain KEY=VALUE and a FURTKA_JSON sentinel. JSON
# wins on conflict (overlays plain).
monkeypatch.setattr(
dockerops,
"compose_exec_script",
lambda *a, **k: 'MQTT_USER=oldval\nFURTKA_JSON: {"MQTT_USER": "newval", "TOKEN": "abc"}\n',
)
runner.run_install("z2m")
env_text = (consumer / ".env").read_text()
assert "MQTT_USER=newval" in env_text # JSON overlay wins
assert "TOKEN=abc" in env_text
def test_run_install_hook_rejects_bad_key_name(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "mosquitto")
consumer = _write_installed_app(
apps_dir(),
"z2m",
requires=[{"app": "mosquitto", "on_install": "hooks/x.sh"}],
)
(apps_dir() / "mosquitto" / "hooks").mkdir()
(apps_dir() / "mosquitto" / "hooks" / "x.sh").write_bytes(b"")
(consumer / ".env").write_text("")
_write_plan(runner.plan_path(), "z2m", ["mosquitto", "z2m"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
monkeypatch.setattr(dockerops, "compose_image_tags", lambda a, p: {"mosquitto": "img"})
monkeypatch.setattr(dockerops, "compose_exec_script", lambda *a, **k: "lowercase_key=oops\n")
with pytest.raises(runner.InstallRunnerError, match="UPPER_SNAKE_CASE"):
runner.run_install("z2m")
s = runner.read_state()
assert s["stage"] == "error"
# Consumer's compose_up was never called because the hook failed.
assert not any(c[0] == "up" and c[1] == "z2m" for c in calls)
def test_run_install_hook_rejects_placeholder_value(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "mosquitto")
consumer = _write_installed_app(
apps_dir(),
"z2m",
requires=[{"app": "mosquitto", "on_install": "hooks/x.sh"}],
)
(apps_dir() / "mosquitto" / "hooks").mkdir()
(apps_dir() / "mosquitto" / "hooks" / "x.sh").write_bytes(b"")
(consumer / ".env").write_text("")
_write_plan(runner.plan_path(), "z2m", ["mosquitto", "z2m"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
monkeypatch.setattr(dockerops, "compose_image_tags", lambda a, p: {"mosquitto": "img"})
monkeypatch.setattr(dockerops, "compose_exec_script", lambda *a, **k: "MQTT_PASS=changeme\n")
with pytest.raises(runner.InstallRunnerError, match="placeholder"):
runner.run_install("z2m")
def test_run_install_hook_failure_skips_consumer_compose_up(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "mosquitto")
consumer = _write_installed_app(
apps_dir(),
"z2m",
requires=[{"app": "mosquitto", "on_install": "hooks/x.sh"}],
)
(apps_dir() / "mosquitto" / "hooks").mkdir()
(apps_dir() / "mosquitto" / "hooks" / "x.sh").write_bytes(b"")
(consumer / ".env").write_text("")
_write_plan(runner.plan_path(), "z2m", ["mosquitto", "z2m"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
monkeypatch.setattr(dockerops, "compose_image_tags", lambda a, p: {"mosquitto": "img"})
def boom(*a, **k):
raise dockerops.DockerError("hook returned 1: connection refused")
monkeypatch.setattr(dockerops, "compose_exec_script", boom)
with pytest.raises(dockerops.DockerError):
runner.run_install("z2m")
s = runner.read_state()
assert s["stage"] == "error"
assert s["target"] == "z2m"
# The provider's compose_up DID run earlier in the plan.
assert ("up", "mosquitto") in calls
# But the consumer's never did.
assert ("up", "z2m") not in calls
def test_run_install_missing_provider_hook_file_raises(runner, monkeypatch):
import furtka.dockerops as dockerops
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "mosquitto")
consumer = _write_installed_app(
apps_dir(),
"z2m",
requires=[{"app": "mosquitto", "on_install": "hooks/missing.sh"}],
)
(consumer / ".env").write_text("")
_write_plan(runner.plan_path(), "z2m", ["mosquitto", "z2m"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
monkeypatch.setattr(dockerops, "compose_image_tags", lambda a, p: {"mosquitto": "img"})
with pytest.raises(runner.InstallRunnerError, match="missing in provider"):
runner.run_install("z2m")
def test_run_install_plan_file_is_consumed_after_read(runner, monkeypatch):
"""After a run, the plan file is removed so a stale plan can't steer the next run."""
from furtka.paths import apps_dir
_write_installed_app(apps_dir(), "fileshare")
_write_plan(runner.plan_path(), "fileshare", ["fileshare"])
calls: list = []
_stub_docker_ops(monkeypatch, calls)
runner.run_install("fileshare")
assert not runner.plan_path().exists()
# --- _parse_hook_output (unit) -----------------------------------------------
def test_parse_hook_output_kv_only(runner):
out = runner._parse_hook_output("MQTT_USER=z2m\nMQTT_PASS=hunter2\n")
assert out == {"MQTT_USER": "z2m", "MQTT_PASS": "hunter2"}
def test_parse_hook_output_rejects_lowercase_key(runner):
with pytest.raises(runner.InstallRunnerError, match="UPPER_SNAKE_CASE"):
runner._parse_hook_output("lowercase=oops\n")
def test_parse_hook_output_furtka_json(runner):
out = runner._parse_hook_output('FURTKA_JSON: {"FOO": "bar", "BAZ": "qux"}\n')
assert out == {"FOO": "bar", "BAZ": "qux"}
def test_parse_hook_output_furtka_json_rejects_non_string(runner):
with pytest.raises(runner.InstallRunnerError, match="must be a string"):
runner._parse_hook_output('FURTKA_JSON: {"FOO": 42}\n')
def test_parse_hook_output_furtka_json_rejects_bad_payload(runner):
with pytest.raises(runner.InstallRunnerError, match="must be an object"):
runner._parse_hook_output('FURTKA_JSON: ["not", "a", "dict"]\n')
def test_parse_hook_output_furtka_json_invalid_json(runner):
with pytest.raises(runner.InstallRunnerError, match="invalid FURTKA_JSON"):
runner._parse_hook_output("FURTKA_JSON: {not json}\n")