From 59e71025c7b34d1082630d0b8a51fff4d368fe78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Schottst=C3=A4dt?= Date: Sat, 9 May 2026 20:30:29 +0200 Subject: [PATCH 1/6] feat: refactor runtime for container backends --- .docker/druid-install-command.sh | 4 +- .docker/entrypoint.sh | 110 +- .dockerignore | 8 + .github/workflows/build.yml | 16 +- .github/workflows/docker-builds.yml | 168 +- .github/workflows/pr.yml | 10 +- .gitignore | 5 +- .vscode/launch.json | 331 +++- CONTEXT.md | 340 ++++ Dockerfile | 3 +- Dockerfile.coldstarter | 10 + Dockerfile.nix | 15 - Makefile | 24 +- README.md | 52 +- api/oapi-codegen.yaml | 1 + api/openapi.yaml | 1008 ++--------- apps/druid-client/adapters/cli/attach.go | 17 + apps/druid-client/adapters/cli/create.go | 110 ++ apps/druid-client/adapters/cli/delete.go | 22 + apps/druid-client/adapters/cli/describe.go | 22 + apps/druid-client/adapters/cli/list.go | 22 + apps/druid-client/adapters/cli/login.go | 71 + apps/druid-client/adapters/cli/output.go | 28 + apps/druid-client/adapters/cli/ports.go | 22 + apps/druid-client/adapters/cli/pull.go | 36 + apps/druid-client/adapters/cli/push.go | 132 ++ .../adapters/cli/push_category.go | 48 + apps/druid-client/adapters/cli/register.go | 48 + apps/druid-client/adapters/cli/root.go | 91 + apps/druid-client/adapters/cli/root_test.go | 52 + apps/druid-client/adapters/cli/run.go | 22 + .../adapters/daemon/openapi_client.go | 134 ++ .../adapters/websocket/attacher.go | 97 + .../druid-client/core/ports/runtime_daemon.go | 20 + .../core/services/runtime_service.go | 40 + apps/druid-client/main.go | 15 + apps/druid-coldstarter/adapters/cli/root.go | 39 + .../adapters/filesystem/status_writer.go | 46 + .../core/ports/status_writer.go | 7 + .../core/services/coldstarter.go | 94 + apps/druid-coldstarter/main.go | 15 + .../druid/adapters/cli/app_version.go | 6 +- apps/druid/adapters/cli/output.go | 15 + apps/druid/adapters/cli/root.go | 69 + apps/druid/adapters/cli/root_test.go | 28 + apps/druid/adapters/cli/serve.go | 97 + {cmd => apps/druid/adapters/cli}/update.go | 40 +- .../druid/adapters/cli/validate.go | 8 +- {cmd => apps/druid/adapters/cli}/version.go | 2 +- .../adapters/http/handlers/health_handler.go | 16 + apps/druid/adapters/http/handlers/routes.go | 27 + .../adapters/http/handlers/scroll_handler.go | 115 ++ .../http/handlers/websocket_handler.go | 69 + .../druid/core/services/runtime_controller.go | 539 ++++++ .../core/services/runtime_controller_test.go | 284 +++ main.go => apps/druid/main.go | 8 +- cmd/coldstarter.go | 44 - cmd/port_monitor.go | 46 - cmd/registry.go | 21 - cmd/registry_login.go | 70 - cmd/registry_pull.go | 34 - cmd/registry_push.go | 110 -- cmd/registry_push_category.go | 53 - cmd/root.go | 94 - cmd/run.go | 76 - cmd/scroll.go | 19 - cmd/serve.go | 471 ----- cmd/server/web/middlewares/auth.go | 59 - cmd/server/web/middlewares/header.go | 13 - cmd/server/web/middlewares/validation.go | 125 -- cmd/server/web/server.go | 244 --- docs_md/main.go | 52 +- examples/README.md | 32 + examples/container-lab/scroll.yaml | 143 ++ examples/jobs/scroll.yaml | 45 + examples/minecraft/json.lua | 388 ---- examples/minecraft/packet_handler/json.lua | 388 ---- .../minecraft/packet_handler/minecraft.lua | 262 --- examples/minecraft/packet_handler/query.lua | 187 -- examples/minecraft/scroll.yaml | 107 +- examples/mysql/scroll.yaml | 47 + examples/nginx/scroll-lock.json | 5 - examples/nginx/scroll.yaml | 20 - examples/scroll-cwd-pull/annotations.json | 1 - examples/scroll-cwd-pull/manifest.json | 1 - .../scroll-cwd-pull/packet_handler/json.lua | 388 ---- .../packet_handler/minecraft.lua | 262 --- examples/scroll-cwd-pull/scroll.yaml | 72 - examples/scroll-cwd-pull/update/.gitkeep | 0 examples/scroll-cwd/annotations.json | 1 - examples/scroll-cwd/manifest.json | 1 - examples/scroll-cwd/packet_handler/json.lua | 388 ---- .../scroll-cwd/packet_handler/minecraft.lua | 262 --- examples/scroll-cwd/scroll.yaml | 72 - examples/scroll-cwd/update/.gitkeep | 0 examples/static-web/scroll.yaml | 44 + go.mod | 142 +- go.sum | 426 ++--- internal/api/generated.go | 1562 ++++++++++------- internal/api/generated_test.go | 12 + internal/core/domain/command_status.go | 33 + internal/core/domain/console.go | 7 +- internal/core/domain/log.go | 10 + internal/core/domain/process.go | 90 - internal/core/domain/queue_item.go | 1 - internal/core/domain/runtime.go | 40 + internal/core/domain/runtime_scroll.go | 31 + internal/core/domain/scroll.go | 276 ++- internal/core/domain/scroll_lock.go | 76 - internal/core/domain/scroll_test.go | 124 ++ internal/core/ports/handler_ports.go | 66 - internal/core/ports/services_ports.go | 70 +- internal/core/services/coldstarter.go | 5 +- .../core/services/nix_dependency_service.go | 34 - .../services/nix_dependency_service_test.go | 27 - internal/core/services/plugin_manager.go | 196 --- internal/core/services/port_service.go | 338 +--- internal/core/services/port_service_test.go | 240 --- internal/core/services/procedure_launcher.go | 292 +-- .../core/services/procedure_launcher_test.go | 52 + internal/core/services/process_manager.go | 309 ---- .../core/services/process_manager_env_test.go | 27 - .../core/services/process_manager_test.go | 48 - internal/core/services/process_monitor.go | 299 ---- internal/core/services/queue_manager.go | 214 +-- internal/core/services/queue_manager_test.go | 324 ++-- internal/core/services/registry/oci.go | 6 + internal/core/services/registry/oci_test.go | 2 +- .../core/services/runtime_scroll_manager.go | 249 +++ .../services/runtime_scroll_manager_test.go | 55 + internal/core/services/runtime_state_store.go | 338 ++++ .../core/services/runtime_state_store_test.go | 135 ++ internal/core/services/scroll_service.go | 178 +- internal/core/services/scroll_service_test.go | 92 - internal/core/services/template_renderer.go | 72 - internal/handler/annotation_handler.go | 21 - internal/handler/annotation_handler_test.go | 159 -- internal/handler/coldstarter_handler.go | 22 - internal/handler/coldstarter_handler_test.go | 94 - internal/handler/daemon_handler.go | 22 - internal/handler/health_handler.go | 77 - internal/handler/health_handler_test.go | 197 --- internal/handler/port_handler.go | 68 - internal/handler/port_handler_test.go | 437 ----- internal/handler/process_handler.go | 35 - internal/handler/process_handler_test.go | 163 -- internal/handler/queue_hander.go | 18 - internal/handler/queue_handler_test.go | 152 -- internal/handler/scroll_handler.go | 148 -- internal/handler/scroll_handler_test.go | 677 ------- internal/handler/scroll_log_handler.go | 82 - internal/handler/scroll_metric_handler.go | 28 - .../handler/scroll_metric_handler_test.go | 231 --- internal/handler/watch_handler.go | 233 --- internal/handler/watch_handler_test.go | 362 ---- internal/handler/websocket_handler.go | 142 -- internal/handler/websocket_handler_test.go | 220 --- internal/runtime/backend.go | 36 + internal/runtime/docker/backend.go | 689 ++++++++ internal/runtime/kubernetes/backend.go | 669 +++++++ internal/runtime/kubernetes/config.go | 84 + internal/runtime/kubernetes/config_test.go | 124 ++ internal/runtime/kubernetes/hubble.go | 100 ++ internal/runtime/kubernetes/names.go | 99 ++ internal/runtime/kubernetes/resources.go | 277 +++ internal/runtime/kubernetes/resources_test.go | 235 +++ internal/runtime/kubernetes/state_store.go | 236 +++ .../runtime/kubernetes/state_store_test.go | 115 ++ internal/runtime/kubernetes/wait.go | 79 + internal/runtime/kubernetes/wait_test.go | 61 + internal/runtime/runtime_test.go | 109 ++ internal/signals/process_shutdown.go | 158 -- internal/utils/artifact.go | 7 +- internal/utils/artifact_test.go | 7 + internal/utils/logger/logger.go | 1 - internal/utils/logger/plugin_logger.go | 126 -- internal/utils/runtime_socket.go | 22 + plugin/config.go | 23 - plugin/environment.go | 22 - plugin/grpc.go | 115 -- plugin/interface.go | 43 - plugin/proto/daemon_service.pb.go | 214 --- plugin/proto/daemon_service.proto | 15 - plugin/proto/daemon_service_grpc.pb.go | 105 -- plugin/proto/plugin_service.pb.go | 526 ------ plugin/proto/plugin_service.proto | 37 - plugin/proto/plugin_service_grpc.pb.go | 177 -- plugin/rcon/rcon.go | 201 --- plugin/rcon_web_rust/rcon_web_rust.go | 182 -- plugin/runDebug.sh | 14 - scripts/build_coldstarter_image.sh | 16 + scripts/validate_all_scrolls.sh | 9 +- .../commands/serve_coldstarter_test.go | 181 -- test/integration/commands/serve_idle_test.go | 89 - test/integration/commands/serve_test.go | 201 --- .../commands/serve_watch_ports_test.go | 108 -- test/integration/example_test.go | 72 +- test/mock/services.go | 594 ++----- test/utils/daemon_http_api.go | 55 - test/utils/setup_serve_commands.go | 79 - test/utils/utils.go | 15 - 201 files changed, 9916 insertions(+), 15822 deletions(-) create mode 100644 CONTEXT.md create mode 100644 Dockerfile.coldstarter delete mode 100644 Dockerfile.nix create mode 100644 apps/druid-client/adapters/cli/attach.go create mode 100644 apps/druid-client/adapters/cli/create.go create mode 100644 apps/druid-client/adapters/cli/delete.go create mode 100644 apps/druid-client/adapters/cli/describe.go create mode 100644 apps/druid-client/adapters/cli/list.go create mode 100644 apps/druid-client/adapters/cli/login.go create mode 100644 apps/druid-client/adapters/cli/output.go create mode 100644 apps/druid-client/adapters/cli/ports.go create mode 100644 apps/druid-client/adapters/cli/pull.go create mode 100644 apps/druid-client/adapters/cli/push.go create mode 100644 apps/druid-client/adapters/cli/push_category.go create mode 100644 apps/druid-client/adapters/cli/register.go create mode 100644 apps/druid-client/adapters/cli/root.go create mode 100644 apps/druid-client/adapters/cli/root_test.go create mode 100644 apps/druid-client/adapters/cli/run.go create mode 100644 apps/druid-client/adapters/daemon/openapi_client.go create mode 100644 apps/druid-client/adapters/websocket/attacher.go create mode 100644 apps/druid-client/core/ports/runtime_daemon.go create mode 100644 apps/druid-client/core/services/runtime_service.go create mode 100644 apps/druid-client/main.go create mode 100644 apps/druid-coldstarter/adapters/cli/root.go create mode 100644 apps/druid-coldstarter/adapters/filesystem/status_writer.go create mode 100644 apps/druid-coldstarter/core/ports/status_writer.go create mode 100644 apps/druid-coldstarter/core/services/coldstarter.go create mode 100644 apps/druid-coldstarter/main.go rename cmd/semver.go => apps/druid/adapters/cli/app_version.go (96%) create mode 100644 apps/druid/adapters/cli/output.go create mode 100644 apps/druid/adapters/cli/root.go create mode 100644 apps/druid/adapters/cli/root_test.go create mode 100644 apps/druid/adapters/cli/serve.go rename {cmd => apps/druid/adapters/cli}/update.go (66%) rename cmd/scroll_validate.go => apps/druid/adapters/cli/validate.go (81%) rename {cmd => apps/druid/adapters/cli}/version.go (96%) create mode 100644 apps/druid/adapters/http/handlers/health_handler.go create mode 100644 apps/druid/adapters/http/handlers/routes.go create mode 100644 apps/druid/adapters/http/handlers/scroll_handler.go create mode 100644 apps/druid/adapters/http/handlers/websocket_handler.go create mode 100644 apps/druid/core/services/runtime_controller.go create mode 100644 apps/druid/core/services/runtime_controller_test.go rename main.go => apps/druid/main.go (54%) delete mode 100644 cmd/coldstarter.go delete mode 100644 cmd/port_monitor.go delete mode 100644 cmd/registry.go delete mode 100644 cmd/registry_login.go delete mode 100644 cmd/registry_pull.go delete mode 100644 cmd/registry_push.go delete mode 100644 cmd/registry_push_category.go delete mode 100644 cmd/root.go delete mode 100644 cmd/run.go delete mode 100644 cmd/scroll.go delete mode 100644 cmd/serve.go delete mode 100644 cmd/server/web/middlewares/auth.go delete mode 100644 cmd/server/web/middlewares/header.go delete mode 100644 cmd/server/web/middlewares/validation.go delete mode 100644 cmd/server/web/server.go create mode 100644 examples/README.md create mode 100644 examples/container-lab/scroll.yaml create mode 100644 examples/jobs/scroll.yaml delete mode 100644 examples/minecraft/json.lua delete mode 100644 examples/minecraft/packet_handler/json.lua delete mode 100644 examples/minecraft/packet_handler/minecraft.lua delete mode 100644 examples/minecraft/packet_handler/query.lua create mode 100644 examples/mysql/scroll.yaml delete mode 100644 examples/nginx/scroll-lock.json delete mode 100644 examples/nginx/scroll.yaml delete mode 100644 examples/scroll-cwd-pull/annotations.json delete mode 100644 examples/scroll-cwd-pull/manifest.json delete mode 100644 examples/scroll-cwd-pull/packet_handler/json.lua delete mode 100644 examples/scroll-cwd-pull/packet_handler/minecraft.lua delete mode 100644 examples/scroll-cwd-pull/scroll.yaml delete mode 100644 examples/scroll-cwd-pull/update/.gitkeep delete mode 100644 examples/scroll-cwd/annotations.json delete mode 100644 examples/scroll-cwd/manifest.json delete mode 100644 examples/scroll-cwd/packet_handler/json.lua delete mode 100644 examples/scroll-cwd/packet_handler/minecraft.lua delete mode 100644 examples/scroll-cwd/scroll.yaml delete mode 100644 examples/scroll-cwd/update/.gitkeep create mode 100644 examples/static-web/scroll.yaml create mode 100644 internal/api/generated_test.go create mode 100644 internal/core/domain/command_status.go create mode 100644 internal/core/domain/log.go delete mode 100644 internal/core/domain/process.go create mode 100644 internal/core/domain/runtime.go create mode 100644 internal/core/domain/runtime_scroll.go delete mode 100644 internal/core/domain/scroll_lock.go create mode 100644 internal/core/domain/scroll_test.go delete mode 100644 internal/core/ports/handler_ports.go delete mode 100644 internal/core/services/nix_dependency_service.go delete mode 100644 internal/core/services/nix_dependency_service_test.go delete mode 100644 internal/core/services/plugin_manager.go delete mode 100644 internal/core/services/port_service_test.go create mode 100644 internal/core/services/procedure_launcher_test.go delete mode 100644 internal/core/services/process_manager.go delete mode 100644 internal/core/services/process_manager_env_test.go delete mode 100644 internal/core/services/process_manager_test.go delete mode 100644 internal/core/services/process_monitor.go create mode 100644 internal/core/services/runtime_scroll_manager.go create mode 100644 internal/core/services/runtime_scroll_manager_test.go create mode 100644 internal/core/services/runtime_state_store.go create mode 100644 internal/core/services/runtime_state_store_test.go delete mode 100644 internal/core/services/scroll_service_test.go delete mode 100644 internal/core/services/template_renderer.go delete mode 100644 internal/handler/annotation_handler.go delete mode 100644 internal/handler/annotation_handler_test.go delete mode 100644 internal/handler/coldstarter_handler.go delete mode 100644 internal/handler/coldstarter_handler_test.go delete mode 100644 internal/handler/daemon_handler.go delete mode 100644 internal/handler/health_handler.go delete mode 100644 internal/handler/health_handler_test.go delete mode 100644 internal/handler/port_handler.go delete mode 100644 internal/handler/port_handler_test.go delete mode 100644 internal/handler/process_handler.go delete mode 100644 internal/handler/process_handler_test.go delete mode 100644 internal/handler/queue_hander.go delete mode 100644 internal/handler/queue_handler_test.go delete mode 100644 internal/handler/scroll_handler.go delete mode 100644 internal/handler/scroll_handler_test.go delete mode 100644 internal/handler/scroll_log_handler.go delete mode 100644 internal/handler/scroll_metric_handler.go delete mode 100644 internal/handler/scroll_metric_handler_test.go delete mode 100644 internal/handler/watch_handler.go delete mode 100644 internal/handler/watch_handler_test.go delete mode 100644 internal/handler/websocket_handler.go delete mode 100644 internal/handler/websocket_handler_test.go create mode 100644 internal/runtime/backend.go create mode 100644 internal/runtime/docker/backend.go create mode 100644 internal/runtime/kubernetes/backend.go create mode 100644 internal/runtime/kubernetes/config.go create mode 100644 internal/runtime/kubernetes/config_test.go create mode 100644 internal/runtime/kubernetes/hubble.go create mode 100644 internal/runtime/kubernetes/names.go create mode 100644 internal/runtime/kubernetes/resources.go create mode 100644 internal/runtime/kubernetes/resources_test.go create mode 100644 internal/runtime/kubernetes/state_store.go create mode 100644 internal/runtime/kubernetes/state_store_test.go create mode 100644 internal/runtime/kubernetes/wait.go create mode 100644 internal/runtime/kubernetes/wait_test.go create mode 100644 internal/runtime/runtime_test.go delete mode 100644 internal/signals/process_shutdown.go delete mode 100644 internal/utils/logger/plugin_logger.go create mode 100644 internal/utils/runtime_socket.go delete mode 100644 plugin/config.go delete mode 100644 plugin/environment.go delete mode 100644 plugin/grpc.go delete mode 100644 plugin/interface.go delete mode 100644 plugin/proto/daemon_service.pb.go delete mode 100644 plugin/proto/daemon_service.proto delete mode 100644 plugin/proto/daemon_service_grpc.pb.go delete mode 100644 plugin/proto/plugin_service.pb.go delete mode 100644 plugin/proto/plugin_service.proto delete mode 100644 plugin/proto/plugin_service_grpc.pb.go delete mode 100644 plugin/rcon/rcon.go delete mode 100644 plugin/rcon_web_rust/rcon_web_rust.go delete mode 100644 plugin/runDebug.sh create mode 100755 scripts/build_coldstarter_image.sh delete mode 100644 test/integration/commands/serve_coldstarter_test.go delete mode 100644 test/integration/commands/serve_idle_test.go delete mode 100644 test/integration/commands/serve_test.go delete mode 100644 test/integration/commands/serve_watch_ports_test.go delete mode 100644 test/utils/daemon_http_api.go delete mode 100644 test/utils/setup_serve_commands.go diff --git a/.docker/druid-install-command.sh b/.docker/druid-install-command.sh index a92a4c5b..254da445 100644 --- a/.docker/druid-install-command.sh +++ b/.docker/druid-install-command.sh @@ -10,10 +10,8 @@ fi BASEDIR=$(dirname "$0") wget --show-progress -q -O $BASEDIR/druid https://github.com/highcard-dev/druid-cli/releases/$URL_PATH/druid -wget --show-progress -q -O $BASEDIR/druid_rcon https://github.com/highcard-dev/druid-cli/releases/$URL_PATH/druid_rcon -wget --show-progress -q -O $BASEDIR/druid_rcon_web_rust https://github.com/highcard-dev/druid-cli/releases/$URL_PATH/druid_rcon_web_rust wget --show-progress -q -O $BASEDIR/entrypoint.sh https://github.com/highcard-dev/druid-cli/releases/$URL_PATH/entrypoint.sh -chmod +x $BASEDIR/druid $BASEDIR/druid_rcon $BASEDIR/druid_rcon_web_rust +chmod +x $BASEDIR/druid # Modify the PATH variable to prioritize /app/resources export PATH=$BASEDIR:$PATH diff --git a/.docker/entrypoint.sh b/.docker/entrypoint.sh index 2e2aec71..376d0312 100755 --- a/.docker/entrypoint.sh +++ b/.docker/entrypoint.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash set -e -SD="./" input=$@ # Global args derived from envs that apply to multiple commands @@ -16,123 +15,26 @@ then global_args+=("--config=$DRUID_CONFIG") fi -# Migrate legacy .scroll layout: -# Before: .scroll/ + -# After: + data/ -if [ -d "${SD}.scroll" ]; then - echo "Migrating legacy .scroll layout..." - mkdir -p "${SD}data" - for item in "${SD}"* "${SD}".[!.]*; do - [ -e "$item" ] || continue - name=$(basename "$item") - [ "$name" != "data" ] && [ "$name" != ".scroll" ] && mv "$item" "${SD}data/" - done - mv "${SD}.scroll"/* "${SD}" - rm -rf "${SD}.scroll" - echo "Legacy migration complete" -fi - echo "Druid Version: $(druid version)" if [ ! -z "${DRUID_REGISTRY_HOST}" ] && [ ! -z "${DRUID_REGISTRY_USER}" ] && [ ! -z "${DRUID_REGISTRY_PASSWORD}" ]; then echo "Logging into registry ${DRUID_REGISTRY_HOST}" - druid registry login --host "${DRUID_REGISTRY_HOST}" -u "${DRUID_REGISTRY_USER}" -p "${DRUID_REGISTRY_PASSWORD}" -fi - -if [ "${ENSURE_NIX}" = "true" ]; -then - if [ ! -e "$HOME/.nix-profile" ]; - then - echo "Installing Nix package manager" - sh <(curl --proto '=https' --tlsv1.2 -L https://nixos.org/nix/install) --no-daemon - echo "Nix installed" - fi - nix-channel --update + druid login --host "${DRUID_REGISTRY_HOST}" -u "${DRUID_REGISTRY_USER}" -p "${DRUID_REGISTRY_PASSWORD}" fi -#Check if we should serve as default or when only artifact is specified -if [ -z "$input" ] || [[ $input =~ ([^/]+)/([^:]+):([^/]+) ]] && [[ $input != *" "* ]]; then artifact="${input}" - if [ -z "${artifact}" ]; - then - artifact=$DRUID_SCROLL_ARTIFACT - fi - - echo "Artifact: $artifact" - - #Update command - if [ "${DRUID_AUTO_UPDATE}" = "true" ] && [ -f "${SD}/scroll.yaml" ]; - then - echo "Updating artifact" - - # Build args for `druid update` so that global envs apply as well - update_args=(update "${global_args[@]}") - - druid "${update_args[@]}" - echo "Updated artifact" - fi - - - #ignore-version-check otherwise we have missmatch after update - args=(serve --ignore-version-check --additional-endpoints annotations) - - if [ ! -z "${artifact}" ]; - then - args+=($artifact) - fi - - # Map envs to args (--cwd = scroll dir, --config = path to .druid.yaml) - if [ ! -z "${DRUID_JWKS_SERVER}" ]; - then - args+=("--jwks-server" "${DRUID_JWKS_SERVER}") - fi - - if [ ! -z "${DRUID_USER_ID}" ]; - then - args+=("--user-id" "${DRUID_USER_ID}") - fi +# Serve as default when no command is provided. +if [ -z "$input" ]; then + args=(serve) if [ ! -z "${DRUID_PORT}" ]; then + args+=("--tcp") args+=("--port" "${DRUID_PORT}") fi - if [ ! -z "${DRUID_IDLE}" ]; - then - args+=("--idle=$DRUID_IDLE") - fi - if [ ! -z "${DRUID_WATCH_PORTS}" ]; - then - args+=("--watch-ports=$DRUID_WATCH_PORTS") - fi - - if [ ! -z "${DRUID_WATCH_PORTS_INTERFACES}" ]; - then - args+=("--watch-ports-interfaces" "${DRUID_WATCH_PORTS_INTERFACES}") - fi - - if [ ! -z "${DRUID_COLDSTARTER}" ]; - then - args+=("--coldstarter=$DRUID_COLDSTARTER") - fi - - if [ ! -z "${DRUID_INIT_SNAPSHOT_URL}" ]; - then - args+=("--init-snapshot-url=$DRUID_INIT_SNAPSHOT_URL") - fi - - if [ ! -z "${DRUID_SKIP_ARTIFACT_DOWNLOAD}" ]; - then - args+=("--skip-artifact-download") - fi - # Reuse global args (cwd/config) for serve as well args+=("${global_args[@]}") - - if [ ! -z "${PPROF_BIND}" ]; - then - args+=("--pprof=$PPROF_BIND") - fi echo "Running druid with args from env: ${args[@]}" exec druid "${args[@]}" @@ -156,4 +58,4 @@ else done exec druid "${args[@]}" -fi \ No newline at end of file +fi diff --git a/.dockerignore b/.dockerignore index b7652b72..1aea800c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,10 @@ examples __debug_bin* +.git +.runtime-state +bin +dist +tmp +*.db +*.sock +.DS_Store diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d185644d..1a4b016d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,7 +9,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: "^1.21" + go-version: "^1.24" - run: make test-integration-docker name: Run integration tests inside Docker - run: make test @@ -21,19 +21,17 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: "^1.21" + go-version: "^1.24" - run: make build name: Build - - run: make build-plugins - name: Build plugins - uses: actions/upload-artifact@v4 name: Upload build artifacts with: name: build-artifacts path: | bin/druid - bin/druid_rcon - bin/druid_rcon_web_rust + bin/druid-client + bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh @@ -57,8 +55,6 @@ jobs: name: Build env: VERSION: ${{ steps.version.outputs.version_tag }} - - run: make build-plugins - name: Build plugins - name: Release uses: softprops/action-gh-release@v2 with: @@ -67,8 +63,8 @@ jobs: make_latest: true files: | bin/druid - bin/druid_rcon - bin/druid_rcon_web_rust + bin/druid-client + bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh diff --git a/.github/workflows/docker-builds.yml b/.github/workflows/docker-builds.yml index c37a4f0c..15eea8a8 100644 --- a/.github/workflows/docker-builds.yml +++ b/.github/workflows/docker-builds.yml @@ -1,4 +1,4 @@ -# Reusable workflow: all Docker image builds (base, manifests, steamcmd, nix). +# Reusable workflow: all Docker image builds (base, manifests, steamcmd). # Used by master build.yml and PR pr.yml to avoid duplication. name: Docker builds (reusable) on: @@ -60,6 +60,77 @@ jobs: DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} + docker-coldstarter-amd64: + uses: ./.github/workflows/docker-build-reusable.yml + with: + dockerfile: Dockerfile.coldstarter + runs_on: ubuntu-latest + tags: | + highcard/druid-coldstarter:${{ inputs.version_tag }}-amd64 + artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-amd64 + build_args: | + VERSION=${{ inputs.version_tag }} + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} + DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} + + docker-coldstarter-arm64: + uses: ./.github/workflows/docker-build-reusable.yml + with: + dockerfile: Dockerfile.coldstarter + runs_on: ubuntu-24.04-arm + tags: | + highcard/druid-coldstarter:${{ inputs.version_tag }}-arm64 + artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-arm64 + build_args: | + VERSION=${{ inputs.version_tag }} + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} + DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} + + docker-coldstarter-manifest: + name: Create multi-arch coldstarter manifests + needs: [docker-coldstarter-amd64, docker-coldstarter-arm64] + runs-on: ubuntu-latest + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Create and push coldstarter manifests + uses: Noelware/docker-manifest-action@v1 + with: + inputs: highcard/druid-coldstarter:${{ inputs.version_tag }}-amd64,highcard/druid-coldstarter:${{ inputs.version_tag }}-arm64 + tags: >- + highcard/druid-coldstarter:${{ inputs.version }}, + highcard/druid-coldstarter:${{ inputs.version_tag }}${{ inputs.is_pr != true && ',highcard/druid-coldstarter:latest,highcard/druid-coldstarter:stable' || '' }} + push: true + + docker-coldstarter-manifest-artifacts: + name: Create multi-arch coldstarter manifests (artifacts registry) + needs: [docker-coldstarter-amd64, docker-coldstarter-arm64] + runs-on: ubuntu-latest + steps: + - name: Login to Artifacts Registry + uses: docker/login-action@v3 + with: + registry: artifacts.druid.gg + username: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} + password: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} + - name: Create and push coldstarter manifests (artifacts) + uses: Noelware/docker-manifest-action@v1 + with: + inputs: artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-amd64,artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-arm64 + tags: >- + artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version }}, + artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}${{ inputs.is_pr != true && ',artifacts.druid.gg/druid-team/druid-coldstarter:latest,artifacts.druid.gg/druid-team/druid-coldstarter:stable' || '' }} + push: true + docker-base-manifest: name: Create multi-arch base manifests needs: [docker-base-amd64, docker-base-arm64] @@ -120,98 +191,3 @@ jobs: DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - docker-nix-amd64: - needs: [docker-base-manifest, docker-base-manifest-artifacts] - uses: ./.github/workflows/docker-build-reusable.yml - with: - dockerfile: Dockerfile.nix - runs_on: ubuntu-latest - tags: | - highcard/druid:${{ inputs.version_tag }}-nix-amd64 - artifacts.druid.gg/druid-team/druid:${{ inputs.version_tag }}-nix-amd64 - build_args: | - VERSION=${{ inputs.version_tag }} - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - docker-nix-arm64: - needs: [docker-base-manifest, docker-base-manifest-artifacts] - uses: ./.github/workflows/docker-build-reusable.yml - with: - dockerfile: Dockerfile.nix - runs_on: ubuntu-24.04-arm - tags: | - highcard/druid:${{ inputs.version_tag }}-nix-arm64 - artifacts.druid.gg/druid-team/druid:${{ inputs.version_tag }}-nix-arm64 - build_args: | - VERSION=${{ inputs.version_tag }} - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - docker-nix-steamcmd: - needs: [docker-steamcmd] - uses: ./.github/workflows/docker-build-reusable.yml - with: - dockerfile: Dockerfile.nix - tags: | - highcard/druid:${{ inputs.version_tag }}-nix-steamcmd - artifacts.druid.gg/druid-team/druid:${{ inputs.version_tag }}-nix-steamcmd - ${{ inputs.is_pr != true && format('highcard/druid:latest-nix-steamcmd') || '' }} - ${{ inputs.is_pr != true && format('highcard/druid:stable-nix-steamcmd') || '' }} - ${{ inputs.is_pr != true && format('highcard/druid:{0}-nix-steamcmd', inputs.version) || '' }} - ${{ inputs.is_pr != true && format('artifacts.druid.gg/druid-team/druid:latest-nix-steamcmd') || '' }} - ${{ inputs.is_pr != true && format('artifacts.druid.gg/druid-team/druid:stable-nix-steamcmd') || '' }} - ${{ inputs.is_pr != true && format('artifacts.druid.gg/druid-team/druid:{0}-nix-steamcmd', inputs.version) || '' }} - build_args: | - VERSION=${{ inputs.version_tag }}-steamcmd - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - docker-nix-manifest: - name: Create multi-arch nix manifests - needs: [docker-nix-amd64, docker-nix-arm64] - runs-on: ubuntu-latest - steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Create and push nix manifests - uses: Noelware/docker-manifest-action@v1 - with: - inputs: highcard/druid:${{ inputs.version_tag }}-nix-amd64,highcard/druid:${{ inputs.version_tag }}-nix-arm64 - tags: >- - highcard/druid:${{ inputs.version }}-nix, - highcard/druid:${{ inputs.version_tag }}-nix${{ inputs.is_pr != true && ',highcard/druid:latest-nix,highcard/druid:stable-nix' || '' }} - push: true - - docker-nix-manifest-artifacts: - name: Create multi-arch nix manifests (artifacts registry) - needs: [docker-nix-amd64, docker-nix-arm64] - runs-on: ubuntu-latest - steps: - - name: Login to Artifacts Registry - uses: docker/login-action@v3 - with: - registry: artifacts.druid.gg - username: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - password: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - name: Create and push nix manifests (artifacts) - uses: Noelware/docker-manifest-action@v1 - with: - inputs: artifacts.druid.gg/druid-team/druid:${{ inputs.version_tag }}-nix-amd64,artifacts.druid.gg/druid-team/druid:${{ inputs.version_tag }}-nix-arm64 - tags: >- - artifacts.druid.gg/druid-team/druid:${{ inputs.version }}-nix, - artifacts.druid.gg/druid-team/druid:${{ inputs.version_tag }}-nix${{ inputs.is_pr != true && ',artifacts.druid.gg/druid-team/druid:latest-nix,artifacts.druid.gg/druid-team/druid:stable-nix' || '' }} - push: true diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 008fa8f2..80381787 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -42,15 +42,13 @@ jobs: go-version: "^1.24" - run: make build name: Build - - run: make build-plugins - name: Build Plugins - uses: actions/upload-artifact@v4 with: name: build-artifacts path: | bin/druid - bin/druid_rcon - bin/druid_rcon_web_rust + bin/druid-client + bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh @@ -76,8 +74,8 @@ jobs: prerelease: true files: | bin/druid - bin/druid_rcon - bin/druid_rcon_web_rust + bin/druid-client + bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh diff --git a/.gitignore b/.gitignore index ddcecc5b..ba96ce1b 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ *.tar.gz druid** +!apps/ +!apps/** dlv.log .DS_Store @@ -15,4 +17,5 @@ druid-cli-test !.docker/** -.env \ No newline at end of file +.env +.runtime-state \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index abd2f913..15496181 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -2,105 +2,260 @@ "version": "0.2.0", "configurations": [ { - "name": "Serve (scroll-cwd)", + "name": "druid: serve", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/main.go", + "program": "${workspaceFolder}/apps/druid", "args": [ "serve", - "--cwd", "${workspaceFolder}/examples/scroll-cwd", - "--additional-endpoints", "annotations", - "--allow-plugin-errors", - "-p", "9190" + "--socket", + "/tmp/druid-vscode-runtime.sock", + "--state-dir", + "${workspaceFolder}/.runtime-state", + "--runtime", + "${input:runtimeBackend}" ] }, { - "name": "Serve (minecraft)", + "name": "druid: validate", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/main.go", + "program": "${workspaceFolder}/apps/druid", "args": [ - "serve", - "--cwd", "${workspaceFolder}/examples/minecraft", - "--additional-endpoints", "annotations", - "--allow-plugin-errors", - "-p", "9190" + "validate", + "--strict", + "${workspaceFolder}/${input:scrollPath}" ] }, { - "name": "Run (scroll-cwd)", + "name": "druid: update", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/main.go", + "program": "${workspaceFolder}/apps/druid", + "cwd": "${workspaceFolder}/${input:scrollPath}", "args": [ - "run", - "--cwd", "${workspaceFolder}/examples/scroll-cwd", - "start" + "update", + "${input:artifactRef}", + "${workspaceFolder}/${input:scrollPath}" ] }, { - "name": "Registry Push (scroll-cwd)", + "name": "druid: app_version", "type": "go", "request": "launch", "mode": "debug", - "program": "${workspaceFolder}/main.go", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid", + "cwd": "${workspaceFolder}/${input:scrollPath}", "args": [ - "registry", "push", - "--cwd", "${workspaceFolder}/examples/scroll-cwd" + "app_version" ] }, { - "name": "Registry Push (arg) ", + "name": "druid: version", "type": "go", "request": "launch", "mode": "debug", - "program": "${workspaceFolder}/main.go", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid", "args": [ - "registry", "push","examples/scroll-cwd" + "version" ] }, { - "name": "Registry Pull (scroll-cwd-pull)", + "name": "druid-client: login", "type": "go", "request": "launch", "mode": "debug", - "program": "${workspaceFolder}/main.go", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "cwd": "${workspaceFolder}", "args": [ - "registry", "pull", - "--full", - "--cwd", "${workspaceFolder}/examples/scroll-cwd-pull", - "artifacts.druid.gg/druid-team/scroll-minecraft-forge:1.20.1-test" + "login", + "--host", + "${input:registryHost}", + "--user", + "${input:registryUser}", + "--password", + "${input:registryPassword}" ] }, { - "name": "Registry Login", + "name": "druid-client: pull", "type": "go", "request": "launch", "mode": "debug", - "program": "${workspaceFolder}/main.go", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", "args": [ - "registry", "login", - "--host", "artifacts.druid.gg", - "-u", "$REGISTRY_USER", - "-p", "$REGISTRY_PASS" + "pull", + "${input:artifactRef}", + "${workspaceFolder}/${input:scrollPath}" ] }, { - "name": "Remote Attach", + "name": "druid-client: push", "type": "go", - "request": "attach", - "mode": "remote", - "remotePath": "/app", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "push", + "${input:artifactRef}", + "${workspaceFolder}/${input:scrollPath}" + ] + }, + { + "name": "druid-client: push category", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "push", + "category", + "${input:artifactRef}", + "${input:categoryName}", + "${workspaceFolder}/${input:scrollPath}" + ] + }, + { + "name": "druid-client: create", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", "cwd": "${workspaceFolder}", - "port": 2345, - "host": "127.0.0.1", - "debugAdapter": "legacy" + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "create", + "--state-dir", + "${workspaceFolder}/.runtime-state", + "${input:artifactPath}" + ] + }, + { + "name": "druid-client: register", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "register", + "${workspaceFolder}/${input:scrollPath}" + ] + }, + { + "name": "druid-client: list", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "list" + ] + }, + { + "name": "druid-client: describe", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "describe", + "${input:scrollId}" + ] + }, + { + "name": "druid-client: ports", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "ports", + "${input:scrollId}" + ] + }, + { + "name": "druid-client: run", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "run", + "${input:scrollId}", + "${input:commandName}" + ] + }, + { + "name": "druid-client: delete", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "delete", + "${input:scrollId}" + ] + }, + { + "name": "druid-client: attach", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-client", + "args": [ + "--daemon-socket", + "/tmp/druid-vscode-runtime.sock", + "attach", + "${input:scrollId}", + "${input:consoleName}" + ] + }, + { + "name": "druid-coldstarter: run", + "type": "go", + "request": "launch", + "mode": "debug", + "console": "integratedTerminal", + "program": "${workspaceFolder}/apps/druid-coldstarter", + "args": [ + "--runtime-config", + "${workspaceFolder}/${input:scrollPath}/data/.druid/runtime.json", + "--status-file", + "coldstart/status.json" + ] }, { "name": "Test Current File", @@ -110,6 +265,94 @@ "program": "${file}", "args": [], "showLog": true + }, + { + "name": "Remote Attach", + "type": "go", + "request": "attach", + "mode": "remote", + "remotePath": "/app", + "cwd": "${workspaceFolder}", + "port": 2345, + "host": "127.0.0.1", + "debugAdapter": "legacy" + } + ], + "inputs": [ + { + "id": "runtimeBackend", + "type": "pickString", + "description": "Runtime backend", + "options": [ + "docker" + ], + "default": "docker" + }, + { + "id": "scrollPath", + "type": "pickString", + "description": "Scroll directory", + "options": [ + "examples/jobs", + "examples/static-web", + "examples/minecraft", + "examples/mysql" + ], + "default": "examples/jobs" + }, + { + "id": "artifactPath", + "type": "promptString", + "description": "Artifact reference or local scroll path", + "default": "examples/jobs" + }, + { + "id": "artifactRef", + "type": "promptString", + "description": "Artifact reference", + "default": "ghcr.io/druid-examples/jobs:1.0" + }, + { + "id": "scrollId", + "type": "promptString", + "description": "Runtime scroll id", + "default": "jobs" + }, + { + "id": "commandName", + "type": "promptString", + "description": "Command name", + "default": "report" + }, + { + "id": "consoleName", + "type": "promptString", + "description": "Console name", + "default": "report" + }, + { + "id": "categoryName", + "type": "promptString", + "description": "Registry category name", + "default": "docs" + }, + { + "id": "registryHost", + "type": "promptString", + "description": "Registry host", + "default": "ghcr.io" + }, + { + "id": "registryUser", + "type": "promptString", + "description": "Registry username", + "default": "" + }, + { + "id": "registryPassword", + "type": "promptString", + "description": "Registry password/token", + "default": "" } ] } diff --git a/CONTEXT.md b/CONTEXT.md new file mode 100644 index 00000000..1253acb1 --- /dev/null +++ b/CONTEXT.md @@ -0,0 +1,340 @@ +# Druid CLI Refactor Context + +## Repo State + +- Repo: `/Users/marcschottstadt/Development/druid/druid-cli` +- Branch/worktree is very dirty from an active large refactor. +- Do not assume deleted files are accidental. Many legacy files were intentionally removed. +- Go toolchain used for verification: `GOTOOLCHAIN=go1.24.7` +- `make build` succeeds but still emits the existing OpenAPI 3.1 warning from `oapi-codegen`. + +## Core Direction + +- Split app binaries: + - `apps/druid`: daemon + local OCI/validation tooling + - `apps/druid-client`: daemon client CLI + - `apps/druid-coldstarter`: standalone coldstarter +- Docker runtime is the local backend. Kubernetes runtime support works in-cluster or out-of-cluster with kubeconfig and lives under `internal/runtime/kubernetes`. +- Runtime concept is always `scrolls`; avoid `instances` terminology. +- Daemon is runtime control plane, not OCI artifact manager. +- CLI owns OCI actions: pull, push, login. +- Daemon should consume already-materialized scroll paths. +- Containers must not see daemon-owned scroll spec files like `scroll.yaml`. +- Containers only see explicit mounts sourced from runtime `data/`. + +## Current Command Surface + +`druid` now exposes daemon/local validation tooling only: + +```text +druid serve +druid update [artifact] [dir] +druid validate [dir] +druid app_version +``` + +Removed from `druid`: + +```text +druid pull [dir] +druid push [artifact] +druid push category ... +druid login --host -u -p +druid create +``` + +Runtime daemon and OCI interaction is through `druid-client`: + +```text +druid-client login --host -u -p +druid-client pull [dir] +druid-client push [artifact] [dir] +druid-client push category ... +druid-client create [name] +druid-client register [dir] [name] +``` + +## OCI Ownership + +- Flattened OCI commands now live on `druid-client`: + - old `druid registry pull` -> `druid-client pull [dir]` + - old `druid registry push` -> `druid-client push [artifact] [dir]` + - old `druid registry login` -> `druid-client login ...` +- `druid-client pull` keeps current behavior: + - pulls into optional dir or current working directory + - includes data by default + - `--no-data` skips data files +- `druid-client create` first asks the daemon to materialize: + - Kubernetes daemon creates PVCs and runs a `druid-client pull` Job in-cluster. + - Docker daemon returns materialization unsupported, then client falls back to local materialization into `state/scrolls//spec` and `state/data//data`. + - explicit `--scroll-root`/`--data-root` still materializes directly into those daemon-visible paths. +- `druid-client register [dir] [name]` reports an already checked-out scroll directory without OCI checkout/copying. +- Kubernetes create path: daemon/controller creates PVCs, runs a `druid-client pull` Job, stores runtime scroll state in ConfigMaps, stores opaque `k8s://namespace/pvc` refs there, and runs procedures as Kubernetes Jobs or StatefulSets depending on run mode. +- Docker runtime state stays in local SQLite; Kubernetes runtime state must recover from ConfigMaps, not `state.db`. +- Kubernetes daemon auth prefers in-cluster config, then kubeconfig from `--k8s-kubeconfig`, `DRUID_K8S_KUBECONFIG`, `KUBECONFIG`, or `~/.kube/config`. + +## Daemon/API + +- `druid serve` starts the multi-scroll runtime daemon. +- Daemon listens on a Unix socket. +- OpenAPI is the REST route source. +- REST routes are registered via generated `api.RegisterHandlersWithOptions(...)`. +- Manual REST path registration was removed. +- `/health` is intentionally kept as a manual liveness alias. +- Generated `/api/v1/health` also exists. +- WebSocket attach remains manual: + - `/ws/v1/scrolls/:id/consoles/:console` + +Active OpenAPI REST endpoints now only cover: + +```text +GET /api/v1/health +GET /api/v1/scrolls +POST /api/v1/scrolls +GET /api/v1/scrolls/{id} +DELETE /api/v1/scrolls/{id} +POST /api/v1/scrolls/{id}/commands/{command} +GET /api/v1/scrolls/{id}/ports +``` + +Legacy REST endpoints were removed from OpenAPI and code: + +```text +/api/v1/command +/api/v1/procedure +/api/v1/procedures +/api/v1/logs +/api/v1/metrics +/api/v1/pstree +/api/v1/processes +/api/v1/queue +/api/v1/token +/api/v1/consoles +/api/v1/watch/* +/api/v1/daemon/stop +``` + +## Handler Layout + +- HTTP handlers now live under `apps/druid/adapters/http/handlers`. +- Removed legacy `internal/handler` package and tests. +- Removed `apps/druid/adapters/http/server` and `apps/druid/adapters/http/middlewares`. +- Removed `internal/core/ports/handler_ports.go`. +- `apps/druid/adapters/cli/serve.go` is intentionally thin: + - flags + - dependency construction + - route registration + - socket/TCP listener startup + +## Runtime State Layout + +Runtime state root defaults to `~/.druid/runtime`, or `--state-dir`. + +Paths: + +```text +/scrolls//spec # daemon-owned scroll spec root; contains scroll.yaml +/data//data # runtime data directory; mounted into containers by explicit mounts +``` + +Domain: + +- `RuntimeScroll.ScrollRoot`: daemon-owned spec root +- `RuntimeScroll.DataRoot`: runtime data root parent +- Runtime config generated at `/data/.druid/runtime.json` + +SQLite store: + +- `internal/core/services/runtime_state_store.go` +- Table: `scrolls` +- `data_root` migration exists. + +## Runtime Mount Model + +- Removed implicit `/app/resources/deployment` mount. +- Removed `domain.ScrollMountPath`. +- Procedure mounts are explicit: + +```yaml +mounts: + - path: /server + sub_path: minecraft + read_only: false +``` + +- `sub_path` is optional and relative to runtime `data/`. +- Missing `sub_path` means mount whole `data/`. +- `read_only` is supported. +- Mount validation checks: + - path required + - path absolute + - duplicate mount paths invalid + - sub_path relative + - sub_path cannot escape via `..` + +Docker implementation maps: + +```text +/data/ -> +``` + +## Runtime Config + +- Generated by daemon before running commands. +- Location: `/data/.druid/runtime.json` +- Includes: + - scroll id/name/artifact + - runtime backend and generated time + - top-level ports + - expected ports by procedure +- Coldstarter now supports `--runtime-config` and should prefer it over reading `scroll.yaml`. + +## Procedure Runtime Model + +- Executable runtime fields live on procedures, not commands: + - `type` (`container` default, `signal` explicit) + - `image` + - `command` + - `working_dir` + - `env` + - `mounts` + - `target`/`signal` for signal procedures + - `tty` + - `expectedPorts` +- Commands remain orchestration groups: + - `procedures` + - `needs` + - `run` +- `ProcedureLauncher` no longer owns an OCI registry client. +- Legacy `mode`, `wait`, and `data` procedures are rejected during validation. + +## Expected Ports And Traffic + +- Top-level `ports` define named port metadata. +- Procedure `expectedPorts` references top-level port names. +- `keepAliveTraffic` belongs under `expectedPorts`. +- Examples: + - `10kb/5m` + - `10b/1s` +- Docker backend binds expected ports by resolving named top-level ports. +- Docker traffic is container-level only; same RX/TX stats are copied to every expected port for that procedure/container. +- Port status API: + - `GET /api/v1/scrolls/{id}/ports` + +## InitScroll And Templates + +- `InitScroll` was removed fully. +- `.scroll_template` rendering support was removed fully. +- `scroll-config.yml` and `scroll-config.yml.scroll_template` artifact support was removed. +- Sprig dependency was removed. +- Rationale: stay lean and avoid unclear daemon/data/Kubernetes semantics. Reintroduce explicit pattern later if needed, probably via external tool like `gomplate` or backend-specific init Job. + +## Removed Legacy Areas + +- Removed Nix/dependency-resolution support. +- Removed local runtime backend. +- Removed old single-scroll serve mode. +- Removed `druid daemon`, `druid runtime`, `druid runtime serve`, `druid stop`. +- Removed old port monitor/watch-port flow. +- Removed plugin system files. +- Removed legacy handlers/server/middlewares. + +## Key Files + +- `apps/druid/adapters/cli/root.go`: current `druid` command surface. +- `apps/druid/adapters/cli/serve.go`: daemon startup/listener setup. +- `apps/druid/adapters/http/handlers/routes.go`: generated REST registration plus manual `/health` and websocket route. +- `apps/druid/adapters/http/handlers/scroll_handler.go`: generated OpenAPI server handler methods for runtime scrolls. +- `apps/druid/core/services/runtime_controller.go`: run command, write runtime config, port status. +- `apps/druid-client/adapters/cli/create.go`: local materialization then daemon registration. +- `apps/druid-client/adapters/cli/pull.go`: client-owned OCI pull. +- `apps/druid-client/adapters/cli/push.go`: client-owned OCI push. +- `apps/druid-client/adapters/cli/login.go`: client-owned registry login. +- `apps/druid-client/adapters/daemon/openapi_client.go`: generated OpenAPI client adapter. +- `internal/core/services/runtime_scroll_manager.go`: `RuntimeScrollManager` and `MaterializeScrollArtifact`. +- `internal/core/services/runtime_state_store.go`: SQLite state store. +- `internal/runtime/docker/backend.go`: Docker runtime backend. +- `api/openapi.yaml`: OpenAPI source of truth for REST API. +- `internal/api/generated.go`: generated OpenAPI code. +- `examples/{jobs,static-web,mysql,minecraft}/scroll.yaml`: current examples. + +## Verification Already Run + +These commands passed after the latest changes: + +```sh +GOTOOLCHAIN=go1.24.7 make generate-api +GOTOOLCHAIN=go1.24.7 make mock +GOTOOLCHAIN=go1.24.7 go test ./... +./scripts/validate_all_scrolls.sh +GOTOOLCHAIN=go1.24.7 make build +jq empty .vscode/launch.json +``` + +Also passed local smoke: + +```text +./bin/druid serve --socket /runtime.sock --state-dir /state +./bin/druid-client --daemon-socket /runtime.sock create smoke examples/static-web --state-dir /state +verified: + /scrolls/smoke/spec/scroll.yaml exists + /data/smoke/data exists + /data/smoke/data/scroll.yaml does not exist + druid-client describe smoke works +``` + +## Known Warning + +`make build` and `make generate-api` emit: + +```text +WARNING: You are using an OpenAPI 3.1.x specification, which is not yet supported by oapi-codegen... +``` + +This is known and currently non-blocking. + +## Important Follow-Ups + +- DB-first daemon resume is still conceptual, not implemented: + - daemon startup does not yet restore runners/sessions from `RuntimeScroll.Status` and `RuntimeScroll.Commands`. + - `RunRuntimeScrollCommand` still creates queue machinery per command invocation. + - Need a daemon-owned per-scroll session/controller eventually. +- DB command statuses are not yet persisted on every queue transition. +- `scroll-lock.json` still exists in services and queue behavior; DB should become authoritative later. +- `runtime_instance_manager.go` filename still says instance; consider renaming to match `RuntimeScrollManager`. +- `druid-client create` local materialization assumes shared filesystem with daemon unless explicit `--scroll-root` and `--data-root` are passed. +- Kubernetes design still needs proper backend refs instead of local filesystem paths. +- Docs generated under `docs_md` are stale/incomplete after command flattening; deleted stale registry/runtime command pages but did not regenerate docs. + +## Current Mental Model + +Docker/local create: + +```text +druid serve --runtime docker +druid-client create [name] + -> client materializes OCI/local artifact into runtime state + -> client POSTs generated OpenAPI CreateScrollRequest with scroll_root/data_root + -> daemon reads scroll.yaml through its configured runtime backend + -> daemon caches scroll.yaml in SQLite + +druid-client register [dir] [name] + -> client reports already checked-out dir + -> daemon reads scroll.yaml through its configured runtime backend + -> daemon caches scroll.yaml in SQLite + +druid-client run + -> daemon writes runtime config + -> daemon launches Docker procedure containers using explicit data mounts +``` + +Runtime is daemon-only: `druid-client create/register/list/describe` do not send, store, or display a per-scroll runtime. + +Future Kubernetes create: + +```text +controller/daemon creates PVC/spec volume +Kubernetes Job runs: druid-client pull [mounted-dir] +daemon/controller registers materialized scroll +backend creates Jobs/Deployments/StatefulSets from scroll procedures +``` diff --git a/Dockerfile b/Dockerfile index 0a8fb808..2df1cffd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,6 @@ WORKDIR /go ENV VERSION=${VERSION} RUN make build -RUN make build-plugins # The binaries are in ./bin/ directory after build @@ -44,4 +43,4 @@ RUN useradd -m -u $UID -g $GID -o -s /bin/bash druid USER druid -ENTRYPOINT [ "/entrypoint.sh" ] \ No newline at end of file +ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/Dockerfile.coldstarter b/Dockerfile.coldstarter new file mode 100644 index 00000000..a9cdbf74 --- /dev/null +++ b/Dockerfile.coldstarter @@ -0,0 +1,10 @@ +FROM golang:bullseye AS builder + +ARG VERSION=docker +WORKDIR /src +COPY . . +RUN CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=${VERSION}" -o /out/druid-coldstarter ./apps/druid-coldstarter + +FROM gcr.io/distroless/static-debian12:nonroot +COPY --from=builder /out/druid-coldstarter /usr/bin/druid-coldstarter +ENTRYPOINT ["/usr/bin/druid-coldstarter"] diff --git a/Dockerfile.nix b/Dockerfile.nix deleted file mode 100644 index e69fbf76..00000000 --- a/Dockerfile.nix +++ /dev/null @@ -1,15 +0,0 @@ -ARG VERSION=latest -FROM highcard/druid:${VERSION} -USER root - -RUN apt-get update && apt-get install -y \ - curl xz-utils \ - && rm -rf /var/lib/apt/lists/* -RUN mkdir -m 0755 /nix && chown druid /nix -USER druid - -# install Nix package manager -RUN bash -c "sh <(curl --proto '=https' --tlsv1.2 -L https://nixos.org/nix/install) --no-daemon" -# Make nix available in PATH for all RUN commands -ENV PATH=/home/druid/.nix-profile/bin:/home/druid/.nix-profile/sbin:$PATH -ENV NIX_PATH="nixpkgs=~/.nix-defexpr/channels/nixpkgs" \ No newline at end of file diff --git a/Makefile b/Makefile index 4eb92e98..f297bcc7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ -.PHONY: test build +.PHONY: test build build-coldstarter-image VERSION ?= "dev" +COLDSTARTER_IMAGE ?= druid-coldstarter:local generate-api: ## Generate API types from OpenAPI spec @echo "Generating API types from OpenAPI spec..." @@ -13,8 +14,13 @@ validate-api: ## Validate OpenAPI spec @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/oapi-codegen.yaml api/openapi.yaml > /dev/null @echo "✓ OpenAPI spec is valid" -build: generate-api ## Build Daemon - CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid +build: generate-api ## Build Daemon and helper binaries + CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid ./apps/druid + CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid-client ./apps/druid-client + CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid-coldstarter ./apps/druid-coldstarter + +build-coldstarter-image: ## Build local druid-coldstarter Docker image without pushing + VERSION=$(VERSION) IMAGE=$(COLDSTARTER_IMAGE) ./scripts/build_coldstarter_image.sh build-x86-docker: docker run -e GOOS=linux -e GOARCH=amd64 -it --rm -v ./:/app -w /app --entrypoint=/bin/bash docker.elastic.co/beats-dev/golang-crossbuild:1.22.5-main -c 'CGO_ENABLED=1 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/x86/druid' @@ -22,19 +28,11 @@ build-x86-docker: install: ## Install Daemon cp ./bin/druid /usr/local/bin/druid -build-plugins: ## Build Plugins - CGO_ENABLED=0 go build -o ./bin/druid_rcon ./plugin/rcon/rcon.go - CGO_ENABLED=0 go build -o ./bin/druid_rcon_web_rust ./plugin/rcon_web_rust/rcon_web_rust.go - -proto: - protoc --go_out=paths=source_relative:./ --go-grpc_out=paths=source_relative:./ --go-grpc_opt=paths=source_relative plugin/proto/*.proto - - generate-md-docs: go run ./docs_md/main.go run: ## Run Daemon - go run main.go + go run ./apps/druid mock: mockgen -source=internal/core/ports/services_ports.go -destination test/mock/services.go @@ -61,4 +59,4 @@ test-integration-docker: test-integration-docker-debug: docker build . -f Dockerfile.testing -t druid-cli-test - docker run -v ./:/app --entrypoint=/bin/bash --rm -p 2345:2345 -it druid-cli-test -c "dlv --listen=:2345 --headless=true --log=true --log-output=debugger,debuglineerr,gdbwire,lldbout,rpc --accept-multiclient --api-version=2 test ./test/integration/commands" \ No newline at end of file + docker run -v ./:/app --entrypoint=/bin/bash --rm -p 2345:2345 -it druid-cli-test -c "dlv --listen=:2345 --headless=true --log=true --log-output=debugger,debuglineerr,gdbwire,lldbout,rpc --accept-multiclient --api-version=2 test ./test/integration/commands" diff --git a/README.md b/README.md index 6e461063..ec8a590d 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,10 @@ # Druid CLI -This CLI is a process management tool. -It gives users the ability to launch and observe long running processes. +This repository contains the Druid runtime tools for packaging Scrolls as OCI artifacts, serving the local runtime daemon, and controlling daemon-managed Scrolls. A good use case is to let it run inside of a docker container. It will give additional insights and management abilities. -This CLI is currently deployed within every deployment at [druid.gg](https://druid.gg). +The current runtime backends are Docker for local development and Kubernetes for in-cluster or kubeconfig-backed cluster operation. ## Installation @@ -21,29 +20,56 @@ Also consider our installation documentation: [https://docs.druid.gg/cli/introdu ## Scroll OCI manifest -The Druid CLI uses a **so called Scroll** to get instructions on how to launch and handle the process. +The Druid CLI uses a **so called Scroll** to describe container-backed commands. A scroll can also include files. A Scroll is an OCI Artifact, so it is easy to distribute with registries like Dockerhub. ## Features -### Dependency based process runner +### Binaries -The way processes are handled is described in the `scroll.yaml` and is similar to, how Github Actions work, just with the ability to run indefinetly. -Processes can also depend on each other. +This repository builds three isolated binaries: + +- `apps/druid` -> `bin/druid`: daemon plus local validation/update tooling. +- `apps/druid-client` -> `bin/druid-client`: client-only CLI for daemon API and OCI commands. +- `apps/druid-coldstarter` -> `bin/druid-coldstarter`: standalone coldstart gate binary/image. + +Build all binaries with: + +```bash +make build +``` + +Common local flow: + +```bash +druid serve --runtime docker +druid-client login --host -u -p +druid-client pull [dir] +druid-client push [artifact] [dir] +druid-client create [name] +druid-client register [dir] [name] +druid-client run +druid-client describe +``` + +For examples, omit `[name]` so each scroll derives its own id from `scroll.yaml`. + +### Dependency based command runner + +The way commands are handled is described in the `scroll.yaml` and is similar to how Github Actions work, with support for long-running container commands. +Commands can also depend on each other. ### Web Server -The is a web server included, easily have remote control over the process. +There is a web server included, so you can control daemon-managed containers remotely. There is also websocket support for stdout. TTY is also supported. -### Plugin support - -There is the ability to extend the druid CLI with Plugins based on [Go-Plugins](https://github.com/hashicorp/go-plugin). +### Runtime backend -Example Plugins: +Runtime selection is daemon-only: start the daemon with `druid serve --runtime docker`, then use `druid-client` to create, register, run, and inspect scrolls without passing a runtime. Docker runtime state stays in SQLite under the runtime state directory. Scroll specs and runtime data are materialized separately so containers only receive explicit mounts from runtime `data/`. -https://github.com/highcard-dev/druid-cli/tree/master/plugin +Kubernetes runtime support is available with `druid serve --runtime kubernetes` for in-cluster daemons or out-of-cluster daemons using kubeconfig. It stores daemon scroll state in ConfigMaps, materializes OCI artifacts through cluster Jobs, and uses Cilium/Hubble Relay for port traffic presence. See `docs/kubernetes_runtime.md` for kubeconfig, RBAC, PVC, and Hubble setup. ## Documentation diff --git a/api/oapi-codegen.yaml b/api/oapi-codegen.yaml index 3d4cf9c5..58c1eb26 100644 --- a/api/oapi-codegen.yaml +++ b/api/oapi-codegen.yaml @@ -1,6 +1,7 @@ package: api output: internal/api/generated.go generate: + client: true fiber-server: true models: true embedded-spec: true diff --git a/api/openapi.yaml b/api/openapi.yaml index f7688c01..d253a29d 100644 --- a/api/openapi.yaml +++ b/api/openapi.yaml @@ -3,8 +3,8 @@ info: title: Druid CLI version: 0.1.0 description: | - Druid CLI is a process runner that launches and manages various sorts of - applications, like gameservers, databases or webservers. + Druid CLI launches and manages container-backed scroll applications, like + game servers, databases, and web servers. contact: {} servers: @@ -16,10 +16,6 @@ tags: description: Scroll management and command execution - name: logs description: Log streaming and retrieval - - name: metrics - description: Process metrics and monitoring - - name: process - description: Process management - name: queue description: Command queue management - name: websocket @@ -28,8 +24,8 @@ tags: description: Port information and status - name: health description: Health checks and status - - name: coldstarter - description: Cold start operations + - name: runtime + description: Multi-scroll runtime management - name: daemon description: Daemon lifecycle management - name: watch @@ -52,117 +48,6 @@ components: description: Short-lived query token for WebSocket connections schemas: - # Request Types - StartCommandRequest: - type: object - required: - - command - properties: - command: - type: string - description: The command ID to execute - example: "start" - sync: - type: boolean - default: false - description: Whether to run synchronously (wait for completion) - - AddPortRequest: - type: object - required: - - port - - protocol - - name - properties: - port: - type: integer - description: Port number (1-65535) - minimum: 1 - maximum: 65535 - example: 8080 - protocol: - type: string - description: Network protocol (tcp or udp) - enum: [tcp, udp] - example: "tcp" - name: - type: string - description: Port name/identifier - example: "my-service" - mandatory: - type: boolean - default: false - description: Whether this port must be open for health check - check_activity: - type: boolean - default: false - description: Whether to monitor port activity - description: - type: string - description: Optional port description - - StartProcedureRequest: - type: object - required: - - mode - - data - - process - properties: - mode: - type: string - description: The procedure mode (e.g., "stdin", or plugin mode) - example: "stdin" - data: - type: string - description: The data payload for the procedure - process: - type: string - description: The process name to run the procedure against - dependencies: - type: array - items: - type: string - description: List of dependency IDs this procedure depends on - sync: - type: boolean - default: false - description: Whether to run synchronously - - WatchModeRequest: - type: object - required: [watchPaths] - properties: - watchPaths: - type: array - items: - type: string - description: Directories to watch - hotReloadCommands: - type: array - items: - type: string - description: Commands to run when files change - - # Response Types - TokenResponse: - type: object - required: - - token - properties: - token: - type: string - description: The generated authentication token - - ConsolesResponse: - type: object - required: - - consoles - properties: - consoles: - type: object - additionalProperties: - $ref: '#/components/schemas/Console' - HealthResponse: type: object required: @@ -184,758 +69,256 @@ components: nullable: true description: When the daemon started - WatchStatusResponse: - type: object - required: - - enabled - - watchedPaths - properties: - enabled: - type: boolean - description: Whether watch mode is currently enabled - watchedPaths: - type: array - items: - type: string - description: List of currently watched file paths - - WatchModeResponse: + CreateScrollRequest: type: object required: - - status - - enabled + - artifact properties: - status: + id: type: string - description: Result status of the operation - example: "success" - enabled: - type: boolean - description: Current watch mode state - - ErrorResponse: + description: Deprecated alias for name. Optional local runtime scroll id/name. + example: jobs + name: + type: string + description: Optional local runtime scroll id/name. If omitted, the daemon derives it from scroll.yaml name. + example: jobs + artifact: + type: string + description: OCI artifact reference or local scroll path + example: artifacts.druid.gg/test/test:test + scroll_root: + type: string + description: Optional daemon-local path or backend ref containing scroll.yaml and scroll spec files. If omitted, a materializing runtime backend may pull the artifact. + data_root: + type: string + description: Optional daemon-local path or backend ref containing runtime data directory. If omitted, a materializing runtime backend may pull the artifact. + RuntimeScroll: type: object required: + - id + - artifact + - scroll_root + - data_root + - scroll_name - status - - error + - created_at + - updated_at properties: - status: + id: type: string - example: "error" - error: + owner_id: type: string - description: Error message - - ScrollLogStream: - type: object - required: - - key - - log - properties: - key: + artifact: type: string - description: The log stream identifier - log: - type: array - items: - type: string - description: Array of log lines - - ProcessesResponse: - type: object - required: - - processes - properties: - processes: - type: object - additionalProperties: - $ref: '#/components/schemas/Process' - - # Domain Types - ScrollFile: - type: object - description: Scroll configuration file structure - properties: - name: + scroll_root: type: string - description: Scroll name - desc: + data_root: type: string - description: Scroll description - version: + scroll_name: type: string - description: Scroll version (semver) - example: "1.0.0" - app_version: + status: type: string - description: Application version (not necessarily semver) - init: + enum: [created, running, stopped, error, deleted] + created_at: type: string - description: Initialization command (deprecated, use serve) - deprecated: true - serve: + format: date-time + updated_at: type: string - description: Serve command - ports: - type: array - items: - $ref: '#/components/schemas/Port' - keepAlivePPM: - type: integer - description: Keep alive packets per minute + format: date-time commands: type: object - additionalProperties: - $ref: '#/components/schemas/CommandInstructionSet' - plugins: - type: object - additionalProperties: - type: object - additionalProperties: - type: string - cronjobs: - type: array - items: - $ref: '#/components/schemas/Cronjob' + additionalProperties: true - Port: + DeletedScroll: type: object required: - - port - - protocol - - name + - id + - status properties: - port: - type: integer - description: Port number - example: 8080 - protocol: - type: string - description: Network protocol - example: "tcp" - name: - type: string - description: Port name/identifier - sleep_handler: - type: string - nullable: true - description: Handler to call when port becomes inactive - mandatory: - type: boolean - description: Whether this port must be open for health check - vars: - type: array - items: - $ref: '#/components/schemas/ColdStarterVars' - start_delay: - type: integer - description: Delay in seconds before starting port check - finish_after_command: + id: type: string - description: Command to run after port is available - check_activity: - type: boolean - description: Whether to monitor port activity - description: + status: type: string - description: Port description + example: deleted - AugmentedPort: + RuntimePortStatus: type: object required: + - name + - procedure - port - protocol - - name - - inactive_since - - inactive_since_sec - - open + - bound + - traffic + - source properties: - port: - type: integer - description: Port number - protocol: - type: string - description: Network protocol name: type: string - description: Port name/identifier - sleep_handler: + procedure: type: string - nullable: true - mandatory: - type: boolean - vars: - type: array - items: - $ref: '#/components/schemas/ColdStarterVars' - start_delay: + port: type: integer - finish_after_command: + protocol: type: string - check_activity: + bound: type: boolean - description: + host_ip: type: string - inactive_since: - type: string - format: date-time - description: When the port became inactive - inactive_since_sec: + host_port: type: integer - description: Seconds since port became inactive - open: + traffic: type: boolean - description: Whether the port is currently open - - Console: - type: object - required: - - type - - inputMode - properties: - type: - type: string - enum: [tty, process, plugin] - description: Console type - inputMode: - type: string - description: Input mode for the console - exit: - type: integer - nullable: true - description: Exit code if console has exited - - Process: - type: object - required: - - name - - type - properties: - name: - type: string - description: Process name/identifier - type: - type: string - description: Process type - - ProcessTreeRoot: - type: object - required: - - root - - total_memory_rss - - total_memory_vms - - total_memory_swap - - total_io_counters_read - - total_io_counters_write - - total_cpu_percent - - total_process_count - properties: - root: - $ref: '#/components/schemas/ProcessTreeNode' - total_memory_rss: - type: integer - format: int64 - total_memory_vms: + traffic_bytes: type: integer format: int64 - total_memory_swap: + rx_bytes: type: integer format: int64 - total_io_counters_read: + tx_bytes: type: integer format: int64 - total_io_counters_write: - type: integer - format: int64 - total_cpu_percent: - type: number - format: double - total_process_count: - type: integer - - ProcessTreeNode: - type: object - properties: - process: - type: string - description: Process information (simplified from gopsutil) - memory: - type: string - description: Memory statistics - memory_ex: + keepAliveTraffic: type: string - description: Extended memory statistics - io_counters: - type: string - description: I/O counters - cpu_percent: - type: number - format: double - name: + traffic_window: type: string - gids: - type: array - items: - type: integer - username: - type: string - cmdline: - type: string - children: - type: array - items: - $ref: '#/components/schemas/ProcessTreeNode' - - ProcessMonitorMetrics: - type: object - required: - - cpu - - memory - - connections - - pid - properties: - cpu: - type: number - format: double - description: CPU usage percentage - memory: - type: integer - description: Memory usage in bytes - connections: - type: array - items: - type: string - description: Active network connections - pid: - type: integer - description: Process ID - - CommandInstructionSet: - type: object - required: - - procedures - properties: - dependencies: - type: array - items: - type: string - procedures: - type: array - items: - $ref: '#/components/schemas/Procedure' - needs: - type: array - items: - type: string - run: - type: string - enum: [always, once, restart, persistent] - description: Run mode for the command - - Procedure: - type: object - required: - - mode - properties: - mode: - type: string - description: Procedure execution mode - id: - type: string - nullable: true - description: Unique procedure identifier - wait: - oneOf: - - type: string - - type: integer - - type: boolean - description: Wait condition - data: - description: Procedure data payload - ignore_failure: + traffic_ok: type: boolean - description: Whether to continue on failure - - Cronjob: - type: object - required: - - name - - schedule - - command - properties: - name: + last_activity_at: type: string - schedule: - type: string - description: Cron schedule expression - example: "0 * * * *" - command: - type: string - - ColdStarterVars: - type: object - required: - - name - - value - properties: - name: - type: string - value: + format: date-time + source: type: string - ScrollLockStatus: - type: string - enum: - - running - - done - - error - - waiting - description: Status of a command in the queue - - QueueResponse: - type: object - description: Map of command IDs to their execution status - additionalProperties: - $ref: '#/components/schemas/ScrollLockStatus' - paths: - # Scroll Endpoints - /api/v1/scroll: + # Runtime Scroll Endpoints + /api/v1/scrolls: get: - operationId: getScroll - summary: Get current scroll - description: Returns the currently loaded scroll configuration - tags: [scroll, daemon] - security: - - bearerAuth: [] + operationId: listScrolls + summary: List runtime scrolls + tags: [runtime, daemon] responses: '200': - description: Current scroll file + description: Runtime scrolls content: application/json: schema: - $ref: '#/components/schemas/ScrollFile' - /api/v1/scroll/commands/{command}: - put: - operationId: addCommand - summary: Add command to current scroll - description: Adds a tempoary command to current scroll, useful to add temporary functionality (e.g. used for developer mode at druid.gg) - tags: [scroll, daemon] - security: - - bearerAuth: [] - parameters: - - name: command - in: path - required: true - schema: - type: string - description: Command Name - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CommandInstructionSet' - responses: - '201': - description: Command created - /api/v1/command: - post: - operationId: runCommand - summary: Run a command - description: Execute a command from the scroll configuration - tags: [scroll, daemon] - security: - - bearerAuth: [] - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/StartCommandRequest' - responses: - '200': - description: Command completed synchronously - '201': - description: Command started asynchronously - '400': - description: Bad request - '500': - description: Internal server error - - /api/v1/procedure: + type: array + items: + $ref: '#/components/schemas/RuntimeScroll' post: - operationId: runProcedure - summary: Run a procedure - description: Execute a standalone procedure - tags: [scroll, daemon] - security: - - bearerAuth: [] + operationId: createScroll + summary: Create runtime scroll + tags: [runtime, daemon] requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/StartProcedureRequest' + $ref: '#/components/schemas/CreateScrollRequest' responses: - '200': - description: Procedure completed synchronously - content: - application/json: - schema: - type: object '201': - description: Procedure started asynchronously - '400': - description: Bad request - - /api/v1/procedures: - get: - operationId: getProcedures - summary: Get procedure statuses - description: Get the status of all running procedures - tags: [scroll, process, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Map of procedure statuses - content: - application/json: - schema: - type: object - additionalProperties: - type: object - - # Log Endpoints - /api/v1/logs: - get: - operationId: listAllLogs - summary: List all log streams - description: Get all available log streams with their content - tags: [logs, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Array of log streams + description: Runtime scroll created content: application/json: schema: - type: array - items: - $ref: '#/components/schemas/ScrollLogStream' + $ref: '#/components/schemas/RuntimeScroll' - /api/v1/logs/{stream}: + /api/v1/scrolls/{id}: get: - operationId: listStreamLogs - summary: List logs for a specific stream - description: Get logs for a specific stream identifier - tags: [logs, daemon] - security: - - bearerAuth: [] + operationId: getScroll + summary: Get runtime scroll + tags: [runtime, daemon] parameters: - - name: stream + - name: id in: path required: true schema: type: string - description: Stream identifier responses: '200': - description: Log stream content + description: Runtime scroll content: application/json: schema: - $ref: '#/components/schemas/ScrollLogStream' + $ref: '#/components/schemas/RuntimeScroll' '404': - description: Stream not found - - # Metrics Endpoints - /api/v1/metrics: - get: - operationId: getMetrics - summary: Get process metrics - description: Get metrics for all monitored processes - tags: [metrics, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Process metrics map - content: - application/json: - schema: - type: object - additionalProperties: - $ref: '#/components/schemas/ProcessMonitorMetrics' - - /api/v1/pstree: - get: - operationId: getPsTree - summary: Get process tree - description: Get the process tree for all running processes - tags: [metrics, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Process tree map - content: - application/json: - schema: - type: object - additionalProperties: - $ref: '#/components/schemas/ProcessTreeRoot' - - # Process Endpoints - /api/v1/processes: - get: - operationId: getProcesses - summary: List running processes - description: Get all currently running processes - tags: [process, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Map of running processes - content: - application/json: - schema: - $ref: '#/components/schemas/ProcessesResponse' - - # Queue Endpoint - /api/v1/queue: - get: - operationId: getQueue - summary: Get command queue - description: Get the current command execution queue - tags: [queue, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Queue status map - content: - application/json: - schema: - $ref: '#/components/schemas/QueueResponse' - - # WebSocket Endpoints - /api/v1/token: - get: - operationId: createToken - summary: Create WebSocket token - description: Generate a short-lived token for WebSocket authentication - tags: [websocket, daemon] - security: - - bearerAuth: [] + description: Runtime scroll not found + delete: + operationId: deleteScroll + summary: Delete runtime scroll + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string responses: '200': - description: Generated token + description: Runtime scroll deleted content: application/json: schema: - $ref: '#/components/schemas/TokenResponse' + $ref: '#/components/schemas/DeletedScroll' + '404': + description: Runtime scroll not found - /api/v1/consoles: - get: - operationId: getConsoles - summary: List all consoles - description: Get all available console connections - tags: [websocket, daemon] - security: - - bearerAuth: [] + /api/v1/scrolls/{id}/commands/{command}: + post: + operationId: runScrollCommand + summary: Run runtime scroll command + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: command + in: path + required: true + schema: + type: string responses: '200': - description: Map of available consoles + description: Updated runtime scroll content: application/json: schema: - $ref: '#/components/schemas/ConsolesResponse' + $ref: '#/components/schemas/RuntimeScroll' + '404': + description: Runtime scroll not found - # Port Endpoint - /api/v1/ports: + /api/v1/scrolls/{id}/ports: get: - operationId: getPorts - summary: Get port information - description: Get information about all configured ports - tags: [port, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Array of port information - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/AugmentedPort' - post: - operationId: addPort - summary: Add a port to watch - description: Add a new port to be monitored by the port service (in-memory only) - tags: [port, daemon] - security: - - bearerAuth: [] - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/AddPortRequest' - responses: - '201': - description: Port added successfully - content: - application/json: - schema: - $ref: '#/components/schemas/AugmentedPort' - '400': - description: Invalid port configuration - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - - /api/v1/ports/{port}: - delete: - operationId: deletePort - summary: Remove a watched port - description: Stop watching a port (in-memory only) - tags: [port, daemon] - security: - - bearerAuth: [] + operationId: getScrollPorts + summary: Get runtime scroll port status + tags: [runtime, daemon] parameters: - - name: port + - name: id in: path required: true - description: The port number to remove schema: - type: integer - minimum: 1 - maximum: 65535 + type: string responses: - '204': - description: Port removed successfully - '404': - description: Port not found + '200': + description: Runtime scroll port status content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + type: array + items: + $ref: '#/components/schemas/RuntimePortStatus' + '404': + description: Runtime scroll not found - # Health Endpoint (authenticated) + # Health Endpoint /api/v1/health: get: operationId: getHealthAuth summary: Get health status description: Get daemon health status tags: [health, daemon] - security: - - bearerAuth: [] responses: '200': description: Healthy @@ -949,108 +332,3 @@ paths: application/json: schema: $ref: '#/components/schemas/HealthResponse' - - # Coldstarter Endpoint - /api/v1/coldstarter/finish: - post: - operationId: finishColdstarter - summary: Finish cold start - description: Signal that cold start process is complete - tags: [coldstarter, daemon] - security: - - bearerAuth: [] - responses: - '202': - description: Accepted - - # Daemon Control - /api/v1/daemon/stop: - post: - operationId: stopDaemon - summary: Stop daemon - description: Gracefully stop the daemon - tags: [daemon, daemon] - security: - - bearerAuth: [] - responses: - '201': - description: Stop initiated - - # Watch/Dev Mode Endpoints - /api/v1/watch/enable: - post: - operationId: enableWatch - summary: Enable development mode - description: Start file watching for development - tags: [watch, daemon] - security: - - bearerAuth: [] - requestBody: - required: false - content: - application/json: - schema: - $ref: '#/components/schemas/WatchModeRequest' - responses: - '200': - description: Watch mode enabled - content: - application/json: - schema: - $ref: '#/components/schemas/WatchModeResponse' - '400': - description: Bad request - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - '412': - description: Already active - content: - application/json: - schema: - $ref: '#/components/schemas/WatchModeResponse' - '500': - description: Internal error - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - - /api/v1/watch/disable: - post: - operationId: disableWatch - summary: Disable development mode - description: Stop file watching - tags: [watch, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Watch mode disabled - content: - application/json: - schema: - $ref: '#/components/schemas/WatchModeResponse' - '500': - description: Internal error - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - - /api/v1/watch/status: - get: - operationId: getWatchStatus - summary: Get watch mode status - description: Check if watch mode is enabled and what paths are being watched - tags: [watch, daemon] - security: - - bearerAuth: [] - responses: - '200': - description: Watch status - content: - application/json: - schema: - $ref: '#/components/schemas/WatchStatusResponse' diff --git a/apps/druid-client/adapters/cli/attach.go b/apps/druid-client/adapters/cli/attach.go new file mode 100644 index 00000000..18838a8c --- /dev/null +++ b/apps/druid-client/adapters/cli/attach.go @@ -0,0 +1,17 @@ +package cli + +import ( + ws "github.com/highcard-dev/daemon/apps/druid-client/adapters/websocket" + "github.com/spf13/cobra" +) + +func (a *App) attachCmd() *cobra.Command { + return &cobra.Command{ + Use: "attach ", + Short: "Attach to a daemon-managed runtime console", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return ws.NewAttacher(a.daemonSocket).Attach(cmd.Context(), args[0], args[1]) + }, + } +} diff --git a/apps/druid-client/adapters/cli/create.go b/apps/druid-client/adapters/cli/create.go new file mode 100644 index 00000000..4e0a6456 --- /dev/null +++ b/apps/druid-client/adapters/cli/create.go @@ -0,0 +1,110 @@ +package cli + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/highcard-dev/daemon/apps/druid-client/adapters/daemon" + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/spf13/cobra" +) + +func (a *App) createCmd() *cobra.Command { + var stateDir string + var scrollRoot string + var dataRoot string + var noData bool + cmd := &cobra.Command{ + Use: "create [name]", + Short: "Create a scroll through the daemon", + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + artifact := args[0] + name := "" + if len(args) == 2 { + name = args[1] + } + if stateDir == "" { + defaultStateDir, err := utils.DefaultRuntimeStateDir() + if err != nil { + return err + } + stateDir = defaultStateDir + } + if (scrollRoot == "") != (dataRoot == "") { + return fmt.Errorf("--scroll-root and --data-root must be provided together") + } + + service, err := a.runtimeService() + if err != nil { + return err + } + + if scrollRoot != "" { + if err := coreservices.MaterializeScrollArtifact(artifact, scrollRoot, dataRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { + return err + } + } else { + if !localArtifactExists(artifact) { + scroll, err := service.Create(cmd.Context(), name, artifact, "", "") + if err == nil { + return printJSON(scroll) + } + if !errors.Is(err, daemon.ErrMaterializationUnsupported) { + return err + } + } + store := coreservices.NewRuntimeStateStore(stateDir) + tmpParent := filepath.Join(stateDir, "tmp") + if err := os.MkdirAll(tmpParent, 0755); err != nil { + return err + } + tmpDir, err := os.MkdirTemp(tmpParent, "create-scroll-*") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + stagedScrollRoot := filepath.Join(tmpDir, "spec") + stagedDataRoot := filepath.Join(tmpDir, "data") + if err := coreservices.MaterializeScrollArtifact(artifact, stagedScrollRoot, stagedDataRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { + return err + } + stagedScroll, err := domain.NewScroll(stagedScrollRoot) + if err != nil { + return err + } + id, err := coreservices.RuntimeScrollID(name, stagedScroll.Name) + if err != nil { + return err + } + scrollRoot = store.ScrollRoot(id) + dataRoot = store.DataRoot(id) + if err := coreservices.MoveMaterializedScroll(stagedScrollRoot, stagedDataRoot, scrollRoot, dataRoot); err != nil { + return err + } + } + + scroll, err := service.Create(cmd.Context(), name, artifact, scrollRoot, dataRoot) + if err != nil { + return err + } + return printJSON(scroll) + }, + } + cmd.Flags().StringVar(&stateDir, "state-dir", "", "Runtime state directory for local materialization (default: ~/.druid/runtime)") + cmd.Flags().StringVar(&scrollRoot, "scroll-root", "", "Daemon-local path containing materialized scroll spec") + cmd.Flags().StringVar(&dataRoot, "data-root", "", "Daemon-local path containing runtime data") + cmd.Flags().BoolVar(&noData, "no-data", false, "Skip scroll data files") + return cmd +} + +func localArtifactExists(artifact string) bool { + _, err := os.Stat(artifact) + return err == nil +} diff --git a/apps/druid-client/adapters/cli/delete.go b/apps/druid-client/adapters/cli/delete.go new file mode 100644 index 00000000..9420694c --- /dev/null +++ b/apps/druid-client/adapters/cli/delete.go @@ -0,0 +1,22 @@ +package cli + +import "github.com/spf13/cobra" + +func (a *App) deleteCmd() *cobra.Command { + return &cobra.Command{ + Use: "delete ", + Short: "Delete a scroll from the daemon", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + deleted, err := service.Delete(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(deleted) + }, + } +} diff --git a/apps/druid-client/adapters/cli/describe.go b/apps/druid-client/adapters/cli/describe.go new file mode 100644 index 00000000..c3440357 --- /dev/null +++ b/apps/druid-client/adapters/cli/describe.go @@ -0,0 +1,22 @@ +package cli + +import "github.com/spf13/cobra" + +func (a *App) describeCmd() *cobra.Command { + return &cobra.Command{ + Use: "describe ", + Short: "Describe a scroll from the daemon", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + scroll, err := service.Describe(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(scroll) + }, + } +} diff --git a/apps/druid-client/adapters/cli/list.go b/apps/druid-client/adapters/cli/list.go new file mode 100644 index 00000000..f5d133f7 --- /dev/null +++ b/apps/druid-client/adapters/cli/list.go @@ -0,0 +1,22 @@ +package cli + +import "github.com/spf13/cobra" + +func (a *App) listCmd() *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "List scrolls and status from the daemon", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + scrolls, err := service.List(cmd.Context()) + if err != nil { + return err + } + return printScrolls(scrolls) + }, + } +} diff --git a/apps/druid-client/adapters/cli/login.go b/apps/druid-client/adapters/cli/login.go new file mode 100644 index 00000000..135b4ba6 --- /dev/null +++ b/apps/druid-client/adapters/cli/login.go @@ -0,0 +1,71 @@ +package cli + +import ( + "fmt" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func (a *App) loginCmd() *cobra.Command { + var registryHost string + var registryUser string + var registryPassword string + + cmd := &cobra.Command{ + Use: "login", + Short: "Login to OCI registry", + Long: `Add or update registry credentials in the configuration. +Supports multiple registries with path-based credential matching. + +Examples: + druid-client login --host registry-1.docker.io -u user -p pass + druid-client login --host artifacts.druid.gg/project1 -u user1 -p pass1 + druid-client login --host artifacts.druid.gg/project2 -u user2 -p pass2`, + RunE: func(cmd *cobra.Command, args []string) error { + + if err := registry.ValidateCredentials(registryHost, registryUser, registryPassword); err != nil { + return fmt.Errorf("login failed: %w", err) + } + + cmd.Println("Login succeeded") + + var registries []domain.RegistryCredential + viper.UnmarshalKey("registries", ®istries) + + newCred := domain.RegistryCredential{ + Host: registryHost, + Username: registryUser, + Password: registryPassword, + } + + found := false + for i := range registries { + if registries[i].Host == registryHost { + registries[i] = newCred + found = true + break + } + } + + if !found { + registries = append(registries, newCred) + } + + viper.Set("registries", registries) + + return viper.WriteConfig() + }, + } + + cmd.Flags().StringVar(®istryHost, "host", "", "OCI registry host (e.g., artifacts.druid.gg/project1)") + cmd.Flags().StringVarP(®istryUser, "user", "u", "", "username") + cmd.Flags().StringVarP(®istryPassword, "password", "p", "", "User password") + + cmd.MarkFlagRequired("host") + cmd.MarkFlagRequired("user") + cmd.MarkFlagRequired("password") + return cmd +} diff --git a/apps/druid-client/adapters/cli/output.go b/apps/druid-client/adapters/cli/output.go new file mode 100644 index 00000000..38029f99 --- /dev/null +++ b/apps/druid-client/adapters/cli/output.go @@ -0,0 +1,28 @@ +package cli + +import ( + "encoding/json" + "fmt" + "os" + "text/tabwriter" + + "github.com/highcard-dev/daemon/internal/api" +) + +func printScrolls(scrolls []api.RuntimeScroll) error { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "NAME\tSTATUS\tSCROLL") + for _, scroll := range scrolls { + fmt.Fprintf(w, "%s\t%s\t%s\n", scroll.Id, scroll.Status, scroll.ScrollName) + } + return w.Flush() +} + +func printJSON(v interface{}) error { + data, err := json.MarshalIndent(v, "", " ") + if err != nil { + return err + } + fmt.Println(string(data)) + return nil +} diff --git a/apps/druid-client/adapters/cli/ports.go b/apps/druid-client/adapters/cli/ports.go new file mode 100644 index 00000000..686cba0e --- /dev/null +++ b/apps/druid-client/adapters/cli/ports.go @@ -0,0 +1,22 @@ +package cli + +import "github.com/spf13/cobra" + +func (a *App) portsCmd() *cobra.Command { + return &cobra.Command{ + Use: "ports ", + Short: "Show runtime port status for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + ports, err := service.Ports(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(ports) + }, + } +} diff --git a/apps/druid-client/adapters/cli/pull.go b/apps/druid-client/adapters/cli/pull.go new file mode 100644 index 00000000..ee992bbe --- /dev/null +++ b/apps/druid-client/adapters/cli/pull.go @@ -0,0 +1,36 @@ +package cli + +import ( + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" +) + +func (a *App) pullCmd() *cobra.Command { + var noData bool + cmd := &cobra.Command{ + Use: "pull [dir]", + Short: "Pull a scroll from an OCI registry (tag or digest)", + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + artifact := args[0] + dir := currentWorkingDir() + if len(args) == 2 { + dir = args[1] + } + + registryClient := registry.NewOciClient(a.loadRegistryStore()) + + err := registryClient.PullSelective(dir, artifact, !noData, nil) + if err != nil { + logger.Log().Error("Failed to pull from registry") + return err + } + + logger.Log().Info("Pulled from registry") + return nil + }, + } + cmd.Flags().BoolVar(&noData, "no-data", false, "Skip scroll data files") + return cmd +} diff --git a/apps/druid-client/adapters/cli/push.go b/apps/druid-client/adapters/cli/push.go new file mode 100644 index 00000000..fe63a677 --- /dev/null +++ b/apps/druid-client/adapters/cli/push.go @@ -0,0 +1,132 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +func (a *App) pushCmd() *cobra.Command { + var minRam string + var minCpu string + var minDisk string + var image string + var scrollPorts []string + var packMeta bool + var smart bool + var category string + + cmd := &cobra.Command{ + Use: "push [artifact] [dir]", + Short: "Generate OCI Artifacts and push to a remote registry", + Args: cobra.MaximumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + credStore := a.loadRegistryStore() + if !credStore.HasCredentials() { + return fmt.Errorf("no registry credentials configured. Please use `druid-client login` to set them") + } + + fullPath := currentWorkingDir() + artifact := "" + switch len(args) { + case 1: + if isScrollDir(args[0]) { + fullPath = args[0] + } else { + artifact = args[0] + } + case 2: + artifact = args[0] + fullPath = args[1] + } + + scroll, err := domain.NewScroll(fullPath) + + if err != nil { + return err + } + + repo := scroll.Name + tag := scroll.AppVersion + + if artifact != "" { + repo, tag = utils.SplitArtifact(artifact) + } + + logger.Log().Info("Pushing "+repo+":"+tag+" to registry", zap.String("path", fullPath)) + + ociClient := registry.NewOciClient(credStore) + + overrides := map[string]string{} + if minRam != "" { + overrides["gg.druid.scroll.minRam"] = minRam + } + if minCpu != "" { + overrides["gg.druid.scroll.minCpu"] = minCpu + } + if minDisk != "" { + overrides["gg.druid.scroll.minDisk"] = minDisk + } + if image != "" { + overrides["gg.druid.scroll.image"] = image + } + if smart { + overrides["gg.druid.scroll.smart"] = "true" + } + if category != "" { + overrides["gg.druid.scroll.category"] = category + } + for _, p := range scrollPorts { + parts := strings.Split(p, "=") + name := parts[0] + port := "0" + if len(parts) == 2 { + port = parts[1] + } + overrides[fmt.Sprintf("gg.druid.scroll.port.%s", name)] = port + } + + _, err = ociClient.Push(fullPath, repo, tag, overrides, packMeta, &scroll.File) + if err != nil { + return err + } + + logger.Log().Info("Pushed "+scroll.Name+" to registry", zap.String("path", fullPath)) + return nil + }, + } + + cmd.AddCommand(a.pushCategoryCmd()) + + cmd.Flags().StringVarP(&minRam, "min-ram", "r", minRam, "Minimum RAM required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minRam)") + cmd.Flags().StringVarP(&minCpu, "min-cpu", "c", minCpu, "Minimum CPU required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minCpu)") + cmd.Flags().StringVarP(&minDisk, "min-disk", "d", minDisk, "Minimum Disk required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minDisk)") + cmd.Flags().BoolVarP(&smart, "smart", "s", false, "Indicates, if the scroll is able to run as a smart deployment (Will be added as a manifest annotation gg.druid.scroll.smart)") + cmd.Flags().StringVar(&category, "category", category, "Category of the scroll. (Will be added as a manifest annotation gg.druid.scroll.category)") + + cmd.Flags().StringVarP(&image, "image", "i", image, "Image to use for the scroll. (Will be added as a manifest annotation gg.druid.scroll.image)") + + cmd.Flags().StringSliceVarP(&scrollPorts, "port", "p", scrollPorts, "Ports to expose. Format webserver=80, dns=53/udp or just ftp (Will be added as a manifest annotation gg.druid.scroll.ports.)") + + cmd.Flags().BoolVarP(&packMeta, "pack-meta", "m", packMeta, "Pack the meta folder into the scroll.") + return cmd +} + +func isScrollDir(dir string) bool { + if dir == "" { + return false + } + path := filepath.Join(dir, "scroll.yaml") + if _, err := os.Stat(path); err == nil { + return true + } + return false +} diff --git a/apps/druid-client/adapters/cli/push_category.go b/apps/druid-client/adapters/cli/push_category.go new file mode 100644 index 00000000..9b807853 --- /dev/null +++ b/apps/druid-client/adapters/cli/push_category.go @@ -0,0 +1,48 @@ +package cli + +import ( + "fmt" + + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +func (a *App) pushCategoryCmd() *cobra.Command { + var pushCategoryNamePattern string + + cmd := &cobra.Command{ + Use: "category", + Short: "Push locale markdown files (e.g. de-DE.md) from a scroll directory as separate OCI layers.", + Args: cobra.RangeArgs(2, 3), + RunE: func(cmd *cobra.Command, args []string) error { + credStore := a.loadRegistryStore() + if !credStore.HasCredentials() { + return fmt.Errorf("no registry credentials configured. Please use `druid-client login` to set them") + } + + repo := args[0] + category := args[1] + scrollDir := currentWorkingDir() + if len(args) == 3 { + scrollDir = args[2] + } + + logger.Log().Info("Pushing "+repo+" category to registry", zap.String("scrollDir", scrollDir)) + + ociClient := registry.NewOciClient(credStore) + + _, err := ociClient.PushCategory(scrollDir, repo, category) + + if err != nil { + return err + } + + logger.Log().Info("Pushed " + repo + " category to registry") + return nil + }, + } + cmd.Flags().StringVar(&pushCategoryNamePattern, "match", "", "Regexp matching file basenames to push (default: locale markdown like de-DE.md)") + return cmd +} diff --git a/apps/druid-client/adapters/cli/register.go b/apps/druid-client/adapters/cli/register.go new file mode 100644 index 00000000..d0bcf5d1 --- /dev/null +++ b/apps/druid-client/adapters/cli/register.go @@ -0,0 +1,48 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" +) + +func (a *App) registerCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "register [dir] [name]", + Short: "Register an already checked-out scroll with the daemon", + Args: cobra.MaximumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + dir := currentWorkingDir() + name := "" + if len(args) >= 1 { + dir = args[0] + } + if len(args) == 2 { + name = args[1] + } + scrollRoot, err := filepath.Abs(dir) + if err != nil { + return err + } + info, err := os.Stat(filepath.Join(scrollRoot, "scroll.yaml")) + if err != nil { + return fmt.Errorf("registered scroll directory must contain scroll.yaml: %w", err) + } + if info.IsDir() { + return fmt.Errorf("registered scroll directory must contain scroll.yaml file") + } + service, err := a.runtimeService() + if err != nil { + return err + } + scroll, err := service.Create(cmd.Context(), name, scrollRoot, scrollRoot, scrollRoot) + if err != nil { + return err + } + return printJSON(scroll) + }, + } + return cmd +} diff --git a/apps/druid-client/adapters/cli/root.go b/apps/druid-client/adapters/cli/root.go new file mode 100644 index 00000000..165e46cc --- /dev/null +++ b/apps/druid-client/adapters/cli/root.go @@ -0,0 +1,91 @@ +package cli + +import ( + "os" + + "github.com/highcard-dev/daemon/apps/druid-client/adapters/daemon" + "github.com/highcard-dev/daemon/apps/druid-client/core/services" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +type App struct { + daemonSocket string + configFile string + envPath string +} + +func NewRootCommand() *cobra.Command { + app := &App{} + cmd := &cobra.Command{ + Use: "druid-client", + Short: "Druid runtime daemon client", + Run: func(cmd *cobra.Command, args []string) { + cmd.Usage() + }, + } + cobra.OnInitialize(app.initConfig) + cmd.PersistentFlags().StringVar(&app.daemonSocket, "daemon-socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path") + cmd.PersistentFlags().StringVar(&app.configFile, "config", "", "Path to config file (default: ~/.druid.yaml)") + cmd.PersistentFlags().StringVarP(&app.envPath, "env-file", "e", "./.env", "Path to environment file (.env)") + cmd.AddCommand(app.createCmd()) + cmd.AddCommand(app.registerCmd()) + cmd.AddCommand(app.listCmd()) + cmd.AddCommand(app.describeCmd()) + cmd.AddCommand(app.deleteCmd()) + cmd.AddCommand(app.runCmd()) + cmd.AddCommand(app.portsCmd()) + cmd.AddCommand(app.attachCmd()) + cmd.AddCommand(app.pullCmd()) + cmd.AddCommand(app.pushCmd()) + cmd.AddCommand(app.loginCmd()) + return cmd +} + +func (a *App) initConfig() { + viper.AutomaticEnv() + if a.configFile != "" { + viper.SetConfigFile(a.configFile) + } else { + home, err := os.UserHomeDir() + cobra.CheckErr(err) + viper.SetConfigType("yaml") + viper.SetConfigName(".druid") + viper.AddConfigPath(home) + } + viper.SafeWriteConfig() + viper.ReadInConfig() +} + +func (a *App) loadRegistryStore() *registry.CredentialStore { + var registries []domain.RegistryCredential + viper.UnmarshalKey("registries", ®istries) + if len(registries) == 0 { + host := viper.GetString("registry.host") + user := viper.GetString("registry.user") + password := viper.GetString("registry.password") + if host != "" { + registries = append(registries, domain.RegistryCredential{Host: host, Username: user, Password: password}) + } + } + return registry.NewCredentialStore(registries) +} + +func (a *App) runtimeService() (*services.RuntimeService, error) { + client, err := daemon.NewOpenAPIClient(a.daemonSocket) + if err != nil { + return nil, err + } + return services.NewRuntimeService(client), nil +} + +func currentWorkingDir() string { + cwd, err := os.Getwd() + if err != nil { + return "." + } + return cwd +} diff --git a/apps/druid-client/adapters/cli/root_test.go b/apps/druid-client/adapters/cli/root_test.go new file mode 100644 index 00000000..a43ad782 --- /dev/null +++ b/apps/druid-client/adapters/cli/root_test.go @@ -0,0 +1,52 @@ +package cli + +import "testing" + +func TestRootCommandExposesOCICommands(t *testing.T) { + root := NewRootCommand() + for _, name := range []string{"pull", "push", "login", "register"} { + cmd, _, err := root.Find([]string{name}) + if err != nil || cmd == nil || cmd.Name() != name { + t.Fatalf("druid-client should expose %q", name) + } + } + cmd, _, err := root.Find([]string{"push", "category"}) + if err != nil || cmd == nil || cmd.Name() != "category" { + t.Fatalf("druid-client should expose push category") + } +} + +func TestRegisterRejectsDirectoryWithoutScrollYAML(t *testing.T) { + cmd := (&App{}).registerCmd() + err := cmd.RunE(cmd, []string{t.TempDir()}) + if err == nil { + t.Fatal("register should reject directory without scroll.yaml") + } +} + +func TestRootCommandIsSocketOnly(t *testing.T) { + root := NewRootCommand() + if flag := root.PersistentFlags().Lookup("daemon-url"); flag != nil { + t.Fatal("druid-client should not expose --daemon-url") + } + if flag := root.PersistentFlags().Lookup("daemon-socket"); flag == nil { + t.Fatal("druid-client should expose --daemon-socket") + } +} + +func TestRootCommandDoesNotExposeCWDFlag(t *testing.T) { + root := NewRootCommand() + if flag := root.PersistentFlags().Lookup("cwd"); flag != nil { + t.Fatal("druid-client should not expose --cwd") + } +} + +func TestCreateAndRegisterDoNotExposeRuntimeFlag(t *testing.T) { + app := &App{} + if flag := app.createCmd().Flags().Lookup("runtime"); flag != nil { + t.Fatal("druid-client create should not expose --runtime") + } + if flag := app.registerCmd().Flags().Lookup("runtime"); flag != nil { + t.Fatal("druid-client register should not expose --runtime") + } +} diff --git a/apps/druid-client/adapters/cli/run.go b/apps/druid-client/adapters/cli/run.go new file mode 100644 index 00000000..87a158be --- /dev/null +++ b/apps/druid-client/adapters/cli/run.go @@ -0,0 +1,22 @@ +package cli + +import "github.com/spf13/cobra" + +func (a *App) runCmd() *cobra.Command { + return &cobra.Command{ + Use: "run ", + Short: "Run a command on a daemon-managed scroll", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + scroll, err := service.Run(cmd.Context(), args[0], args[1]) + if err != nil { + return err + } + return printJSON(scroll) + }, + } +} diff --git a/apps/druid-client/adapters/daemon/openapi_client.go b/apps/druid-client/adapters/daemon/openapi_client.go new file mode 100644 index 00000000..116bea84 --- /dev/null +++ b/apps/druid-client/adapters/daemon/openapi_client.go @@ -0,0 +1,134 @@ +package daemon + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "strings" + + "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/utils" +) + +var ErrMaterializationUnsupported = errors.New("daemon materialization unsupported") + +type OpenAPIClient struct { + client *api.ClientWithResponses +} + +func NewOpenAPIClient(daemonSocket string) (*OpenAPIClient, error) { + if daemonSocket == "" { + daemonSocket = utils.DefaultRuntimeSocketPath() + } + transport := &http.Transport{ + DialContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", daemonSocket) + }, + } + client, err := api.NewClientWithResponses("http://druid", api.WithHTTPClient(&http.Client{Transport: transport})) + if err != nil { + return nil, err + } + return &OpenAPIClient{client: client}, nil +} + +func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string) (*api.RuntimeScroll, error) { + var requestName *string + if name != "" { + requestName = &name + } + var requestScrollRoot *string + if scrollRoot != "" { + requestScrollRoot = &scrollRoot + } + var requestDataRoot *string + if dataRoot != "" { + requestDataRoot = &dataRoot + } + res, err := c.client.CreateScrollWithResponse(ctx, api.CreateScrollJSONRequestBody{ + Artifact: artifact, + Name: requestName, + ScrollRoot: requestScrollRoot, + DataRoot: requestDataRoot, + }) + if err != nil { + return nil, err + } + if res.StatusCode() == http.StatusNotImplemented { + return nil, ErrMaterializationUnsupported + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON201, nil +} + +func (c *OpenAPIClient) ListScrolls(ctx context.Context) ([]api.RuntimeScroll, error) { + res, err := c.client.ListScrollsWithResponse(ctx) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + if res.JSON200 == nil { + return nil, nil + } + return *res.JSON200, nil +} + +func (c *OpenAPIClient) GetScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) { + res, err := c.client.GetScrollWithResponse(ctx, id) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON200, nil +} + +func (c *OpenAPIClient) DeleteScroll(ctx context.Context, id string) (*api.DeletedScroll, error) { + res, err := c.client.DeleteScrollWithResponse(ctx, id) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON200, nil +} + +func (c *OpenAPIClient) RunScrollCommand(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) { + res, err := c.client.RunScrollCommandWithResponse(ctx, id, command) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON200, nil +} + +func (c *OpenAPIClient) GetScrollPorts(ctx context.Context, id string) ([]api.RuntimePortStatus, error) { + res, err := c.client.GetScrollPortsWithResponse(ctx, id) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + if res.JSON200 == nil { + return nil, nil + } + return *res.JSON200, nil +} + +func ensureStatus(statusCode int, body []byte) error { + if statusCode < 400 { + return nil + } + return fmt.Errorf("daemon returned %d: %s", statusCode, strings.TrimSpace(string(body))) +} diff --git a/apps/druid-client/adapters/websocket/attacher.go b/apps/druid-client/adapters/websocket/attacher.go new file mode 100644 index 00000000..15e5fd40 --- /dev/null +++ b/apps/druid-client/adapters/websocket/attacher.go @@ -0,0 +1,97 @@ +package websocket + +import ( + "context" + "fmt" + "io" + "net" + "net/url" + "os" + "os/signal" + "syscall" + + gw "github.com/gorilla/websocket" + "github.com/highcard-dev/daemon/internal/utils" +) + +type Attacher struct { + daemonSocket string +} + +func NewAttacher(daemonSocket string) *Attacher { + return &Attacher{daemonSocket: daemonSocket} +} + +func (a *Attacher) Attach(ctx context.Context, scroll string, console string) error { + wsURL, err := a.websocketURL(scroll, console) + if err != nil { + return err + } + daemonSocket := a.daemonSocket + if daemonSocket == "" { + daemonSocket = utils.DefaultRuntimeSocketPath() + } + dialer := &gw.Dialer{ + NetDialContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", daemonSocket) + }, + } + conn, _, err := dialer.Dial(wsURL, nil) + if err != nil { + return err + } + defer conn.Close() + + ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM) + defer stop() + + done := make(chan error, 2) + go readOutput(conn, done) + go writeInput(conn, done) + + select { + case <-ctx.Done(): + return nil + case err := <-done: + return err + } +} + +func (a *Attacher) websocketURL(scroll string, console string) (string, error) { + return fmt.Sprintf("ws://druid/ws/v1/scrolls/%s/consoles/%s", url.PathEscape(scroll), url.PathEscape(console)), nil +} + +func readOutput(conn *gw.Conn, done chan<- error) { + for { + _, data, err := conn.ReadMessage() + if err != nil { + done <- err + return + } + if _, err := os.Stdout.Write(data); err != nil { + done <- err + return + } + } +} + +func writeInput(conn *gw.Conn, done chan<- error) { + buf := make([]byte, 1024) + for { + n, err := os.Stdin.Read(buf) + if n > 0 { + if writeErr := conn.WriteMessage(gw.TextMessage, buf[:n]); writeErr != nil { + done <- writeErr + return + } + } + if err != nil { + if err == io.EOF { + done <- nil + } else { + done <- err + } + return + } + } +} diff --git a/apps/druid-client/core/ports/runtime_daemon.go b/apps/druid-client/core/ports/runtime_daemon.go new file mode 100644 index 00000000..cb174cfd --- /dev/null +++ b/apps/druid-client/core/ports/runtime_daemon.go @@ -0,0 +1,20 @@ +package ports + +import ( + "context" + + "github.com/highcard-dev/daemon/internal/api" +) + +type RuntimeDaemon interface { + CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string) (*api.RuntimeScroll, error) + ListScrolls(ctx context.Context) ([]api.RuntimeScroll, error) + GetScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) + DeleteScroll(ctx context.Context, id string) (*api.DeletedScroll, error) + RunScrollCommand(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) + GetScrollPorts(ctx context.Context, id string) ([]api.RuntimePortStatus, error) +} + +type ConsoleAttacher interface { + Attach(ctx context.Context, scroll string, console string) error +} diff --git a/apps/druid-client/core/services/runtime_service.go b/apps/druid-client/core/services/runtime_service.go new file mode 100644 index 00000000..2ab00b9c --- /dev/null +++ b/apps/druid-client/core/services/runtime_service.go @@ -0,0 +1,40 @@ +package services + +import ( + "context" + + "github.com/highcard-dev/daemon/apps/druid-client/core/ports" + "github.com/highcard-dev/daemon/internal/api" +) + +type RuntimeService struct { + daemon ports.RuntimeDaemon +} + +func NewRuntimeService(daemon ports.RuntimeDaemon) *RuntimeService { + return &RuntimeService{daemon: daemon} +} + +func (s *RuntimeService) Create(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string) (*api.RuntimeScroll, error) { + return s.daemon.CreateScroll(ctx, name, artifact, scrollRoot, dataRoot) +} + +func (s *RuntimeService) List(ctx context.Context) ([]api.RuntimeScroll, error) { + return s.daemon.ListScrolls(ctx) +} + +func (s *RuntimeService) Describe(ctx context.Context, id string) (*api.RuntimeScroll, error) { + return s.daemon.GetScroll(ctx, id) +} + +func (s *RuntimeService) Delete(ctx context.Context, id string) (*api.DeletedScroll, error) { + return s.daemon.DeleteScroll(ctx, id) +} + +func (s *RuntimeService) Run(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) { + return s.daemon.RunScrollCommand(ctx, id, command) +} + +func (s *RuntimeService) Ports(ctx context.Context, id string) ([]api.RuntimePortStatus, error) { + return s.daemon.GetScrollPorts(ctx, id) +} diff --git a/apps/druid-client/main.go b/apps/druid-client/main.go new file mode 100644 index 00000000..8cb59f72 --- /dev/null +++ b/apps/druid-client/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "os" + + "github.com/highcard-dev/daemon/apps/druid-client/adapters/cli" + "github.com/highcard-dev/daemon/internal/utils/logger" +) + +func main() { + logger.Log(logger.WithStructuredLogging()) + if err := cli.NewRootCommand().Execute(); err != nil { + os.Exit(1) + } +} diff --git a/apps/druid-coldstarter/adapters/cli/root.go b/apps/druid-coldstarter/adapters/cli/root.go new file mode 100644 index 00000000..3f6919cf --- /dev/null +++ b/apps/druid-coldstarter/adapters/cli/root.go @@ -0,0 +1,39 @@ +package cli + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/filesystem" + "github.com/highcard-dev/daemon/apps/druid-coldstarter/core/services" + "github.com/spf13/cobra" +) + +func NewRootCommand() *cobra.Command { + var scrollRoot string + var statusFile string + var runtimeConfig string + + cmd := &cobra.Command{ + Use: "druid-coldstarter", + Short: "Run the standalone Druid coldstart gate", + RunE: func(cmd *cobra.Command, args []string) error { + if scrollRoot == "" && runtimeConfig == "" { + return fmt.Errorf("--scroll-root or --runtime-config is required") + } + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM, syscall.SIGINT) + defer stop() + if runtimeConfig != "" { + return services.NewColdstarterService(filesystem.NewStatusWriter()).RunWithRuntimeConfig(ctx, runtimeConfig, statusFile) + } + return services.NewColdstarterService(filesystem.NewStatusWriter()).Run(ctx, scrollRoot, statusFile) + }, + } + cmd.Flags().StringVar(&scrollRoot, "scroll-root", "", "Mounted scroll root containing scroll.yaml") + cmd.Flags().StringVar(&runtimeConfig, "runtime-config", "", "Generated runtime config path") + cmd.Flags().StringVar(&statusFile, "status-file", "", "Optional status file path, relative to scroll root unless absolute") + return cmd +} diff --git a/apps/druid-coldstarter/adapters/filesystem/status_writer.go b/apps/druid-coldstarter/adapters/filesystem/status_writer.go new file mode 100644 index 00000000..905bfa20 --- /dev/null +++ b/apps/druid-coldstarter/adapters/filesystem/status_writer.go @@ -0,0 +1,46 @@ +package filesystem + +import ( + "encoding/json" + "os" + "path/filepath" + "time" + + "github.com/highcard-dev/daemon/internal/core/domain" +) + +type StatusWriter struct{} + +type status struct { + FinishedAt time.Time `json:"finished_at"` + PortName string `json:"port_name,omitempty"` + Port int `json:"port,omitempty"` + Protocol string `json:"protocol,omitempty"` +} + +func NewStatusWriter() *StatusWriter { + return &StatusWriter{} +} + +func (w *StatusWriter) Write(scrollRoot string, statusFile string, port *domain.AugmentedPort) error { + path := statusFile + if !filepath.IsAbs(path) { + path = filepath.Join(scrollRoot, statusFile) + } + + data := status{FinishedAt: time.Now().UTC()} + if port != nil { + data.PortName = port.Name + data.Port = port.Port.Port + data.Protocol = port.Protocol + } + + encoded, err := json.MarshalIndent(data, "", " ") + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return os.WriteFile(path, append(encoded, '\n'), 0644) +} diff --git a/apps/druid-coldstarter/core/ports/status_writer.go b/apps/druid-coldstarter/core/ports/status_writer.go new file mode 100644 index 00000000..8cd47d10 --- /dev/null +++ b/apps/druid-coldstarter/core/ports/status_writer.go @@ -0,0 +1,7 @@ +package ports + +import "github.com/highcard-dev/daemon/internal/core/domain" + +type StatusWriter interface { + Write(scrollRoot string, statusFile string, port *domain.AugmentedPort) error +} diff --git a/apps/druid-coldstarter/core/services/coldstarter.go b/apps/druid-coldstarter/core/services/coldstarter.go new file mode 100644 index 00000000..c248df3a --- /dev/null +++ b/apps/druid-coldstarter/core/services/coldstarter.go @@ -0,0 +1,94 @@ +package services + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/highcard-dev/daemon/apps/druid-coldstarter/core/ports" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +type ColdstarterService struct { + statusWriter ports.StatusWriter +} + +func NewColdstarterService(statusWriter ports.StatusWriter) *ColdstarterService { + return &ColdstarterService{statusWriter: statusWriter} +} + +func (s *ColdstarterService) RunWithRuntimeConfig(ctx context.Context, runtimeConfigPath string, statusFile string) error { + data, err := os.ReadFile(runtimeConfigPath) + if err != nil { + return fmt.Errorf("failed to read runtime config: %w", err) + } + var config domain.RuntimeConfig + if err := json.Unmarshal(data, &config); err != nil { + return fmt.Errorf("failed to parse runtime config: %w", err) + } + if len(config.Ports) == 0 { + return fmt.Errorf("no ports found in runtime config") + } + + logger.Log().Info("Coldstart runtime config loaded", zap.String("scroll", config.Scroll.ID), zap.Any("ports", config.Ports)) + runtimeRoot := filepath.Dir(runtimeConfigPath) + if filepath.Base(runtimeRoot) == domain.RuntimeConfigDir { + runtimeRoot = filepath.Dir(runtimeRoot) + } + portService := services.NewPortServiceWithScrollFile(&domain.File{Ports: config.Ports}) + coldStarter := services.NewColdStarter(portService, nil, runtimeRoot) + + finish := coldStarter.Start(ctx) + select { + case <-ctx.Done(): + coldStarter.Stop() + return ctx.Err() + case port := <-finish: + coldStarter.Stop() + if statusFile != "" && s.statusWriter != nil { + if err := s.statusWriter.Write(runtimeRoot, statusFile, port); err != nil { + return err + } + } + logger.Log().Info("Coldstarter finished") + return nil + } +} + +func (s *ColdstarterService) Run(ctx context.Context, scrollRoot string, statusFile string) error { + scrollService, err := services.NewScrollService(scrollRoot) + if err != nil { + return fmt.Errorf("failed to load scroll: %w", err) + } + + currentScroll := scrollService.GetCurrent() + if len(currentScroll.Ports) == 0 { + return fmt.Errorf("no ports found in scroll") + } + + logger.Log().Info("Coldstart scroll loaded", zap.String("name", currentScroll.Name), zap.Any("version", currentScroll.Version), zap.Any("ports", currentScroll.Ports)) + + portService := services.NewPortServiceWithScrollFile(¤tScroll.File) + coldStarter := services.NewColdStarter(portService, nil, scrollService.GetDir()) + + finish := coldStarter.Start(ctx) + select { + case <-ctx.Done(): + coldStarter.Stop() + return ctx.Err() + case port := <-finish: + coldStarter.Stop() + if statusFile != "" && s.statusWriter != nil { + if err := s.statusWriter.Write(scrollRoot, statusFile, port); err != nil { + return err + } + } + logger.Log().Info("Coldstarter finished") + return nil + } +} diff --git a/apps/druid-coldstarter/main.go b/apps/druid-coldstarter/main.go new file mode 100644 index 00000000..ca1f2069 --- /dev/null +++ b/apps/druid-coldstarter/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "os" + + "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/cli" +) + +func main() { + if err := cli.NewRootCommand().Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/cmd/semver.go b/apps/druid/adapters/cli/app_version.go similarity index 96% rename from cmd/semver.go rename to apps/druid/adapters/cli/app_version.go index b51a2ff4..f225c992 100644 --- a/cmd/semver.go +++ b/apps/druid/adapters/cli/app_version.go @@ -1,4 +1,4 @@ -package cmd +package cli import ( "fmt" @@ -10,7 +10,7 @@ import ( "github.com/spf13/cobra" ) -var SemverCmd = &cobra.Command{ +var AppVersionCmd = &cobra.Command{ Use: "app_version [semver1 string] [lt|gt|eq|ne|le|ge] [semver2 string]", Short: "Show or compare active app version", Long: "This command shows the active app version. If a comparison operator and semver string are provided, the command will compare the active app version to the semver string. If the comparison is true, the command will exit with a 0 exit code. If the comparison is false, the command will exit with a 1 exit code.", @@ -18,7 +18,7 @@ var SemverCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { logger.Log() - scrollService, err := services.NewScrollService(cwd) + scrollService, err := services.NewScrollService(currentWorkingDir()) if err != nil { return fmt.Errorf("error creating scroll service: %w", err) diff --git a/apps/druid/adapters/cli/output.go b/apps/druid/adapters/cli/output.go new file mode 100644 index 00000000..3bdd634c --- /dev/null +++ b/apps/druid/adapters/cli/output.go @@ -0,0 +1,15 @@ +package cli + +import ( + "encoding/json" + "fmt" +) + +func printJSON(v interface{}) error { + data, err := json.MarshalIndent(v, "", " ") + if err != nil { + return err + } + fmt.Println(string(data)) + return nil +} diff --git a/apps/druid/adapters/cli/root.go b/apps/druid/adapters/cli/root.go new file mode 100644 index 00000000..21f0f722 --- /dev/null +++ b/apps/druid/adapters/cli/root.go @@ -0,0 +1,69 @@ +package cli + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var envPath string +var configFile string +var runtimeStateDir string +var runtimeBackend string + +var RootCmd = &cobra.Command{ + Use: "druid", + Short: "Druid Daemon that enable Scroll usage and communication", + Long: `A Scalable and Customizable daemon + to run any of your applications with the usage of Scrolls.`, + Run: func(cmd *cobra.Command, args []string) { + cmd.Usage() + }, +} + +func init() { + cobra.OnInitialize(initConfig) + + RootCmd.AddCommand(ServeCommand) + RootCmd.AddCommand(UpdateCommand) + RootCmd.AddCommand(AppVersionCmd) + RootCmd.AddCommand(VersionCmd) + RootCmd.AddCommand(ValidateCmd) + + RootCmd.PersistentFlags().StringVarP(&envPath, "env-file", "e", "./.env", "Path to environment file (.env)") + RootCmd.PersistentFlags().StringVar(&configFile, "config", "", "Path to config file (default: ~/.druid.yaml)") + +} + +func initConfig() { + viper.AutomaticEnv() + + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := os.UserHomeDir() + cobra.CheckErr(err) + + viper.SetConfigType("yaml") + viper.SetConfigName(".druid") + viper.AddConfigPath(home) + } + + viper.SafeWriteConfig() + viper.ReadInConfig() +} + +func Execute() { + if err := RootCmd.Execute(); err != nil { + os.Exit(1) + } +} + +func currentWorkingDir() string { + cwd, err := os.Getwd() + if err != nil { + return "." + } + return cwd +} diff --git a/apps/druid/adapters/cli/root_test.go b/apps/druid/adapters/cli/root_test.go new file mode 100644 index 00000000..c30bc66e --- /dev/null +++ b/apps/druid/adapters/cli/root_test.go @@ -0,0 +1,28 @@ +package cli + +import "testing" + +func TestRootCommandDoesNotExposeOCICommands(t *testing.T) { + for _, name := range []string{"pull", "push", "login"} { + if cmd, _, err := RootCmd.Find([]string{name}); err == nil && cmd != nil && cmd.Name() == name { + t.Fatalf("druid should not expose %q", name) + } + } +} + +func TestServeCommandIsSocketOnly(t *testing.T) { + for _, name := range []string{"tcp", "port"} { + if flag := ServeCommand.Flags().Lookup(name); flag != nil { + t.Fatalf("druid serve should not expose --%s", name) + } + } + if flag := ServeCommand.Flags().Lookup("socket"); flag == nil { + t.Fatal("druid serve should expose --socket") + } +} + +func TestRootCommandDoesNotExposeCWDFlag(t *testing.T) { + if flag := RootCmd.PersistentFlags().Lookup("cwd"); flag != nil { + t.Fatal("druid should not expose --cwd") + } +} diff --git a/apps/druid/adapters/cli/serve.go b/apps/druid/adapters/cli/serve.go new file mode 100644 index 00000000..04069f73 --- /dev/null +++ b/apps/druid/adapters/cli/serve.go @@ -0,0 +1,97 @@ +package cli + +import ( + "net" + "os" + "path/filepath" + + "github.com/gofiber/fiber/v2" + runtimehandlers "github.com/highcard-dev/daemon/apps/druid/adapters/http/handlers" + appservices "github.com/highcard-dev/daemon/apps/druid/core/services" + "github.com/highcard-dev/daemon/internal/core/services" + runtimebackend "github.com/highcard-dev/daemon/internal/runtime" + runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +var runtimeSocket string +var k8sNamespace string +var k8sStorageClass string +var k8sPullImage string +var k8sRegistrySecret string +var hubbleRelayAddr string +var k8sKubeconfig string + +var ServeCommand = &cobra.Command{ + Use: "serve", + Short: "Run the multi-scroll runtime daemon", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runRuntimeDaemon() + }, +} + +func init() { + ServeCommand.Flags().StringVar(&runtimeSocket, "socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path") + ServeCommand.Flags().StringVar(&runtimeStateDir, "state-dir", "", "Runtime state directory (default: ~/.druid/runtime)") + ServeCommand.Flags().StringVar(&runtimeBackend, "runtime", "docker", "Default runtime backend. Valid values: docker, kubernetes") + ServeCommand.Flags().StringVar(&k8sNamespace, "k8s-namespace", "", "Kubernetes namespace for runtime resources (default: service account namespace or DRUID_K8S_NAMESPACE)") + ServeCommand.Flags().StringVar(&k8sStorageClass, "k8s-storage-class", "", "Kubernetes storage class for runtime PVCs (default: DRUID_K8S_STORAGE_CLASS)") + ServeCommand.Flags().StringVar(&k8sPullImage, "k8s-pull-image", "", "Kubernetes image used for OCI pull materialization Jobs (default: DRUID_K8S_PULL_IMAGE)") + ServeCommand.Flags().StringVar(&k8sRegistrySecret, "k8s-registry-secret", "", "Kubernetes imagePullSecret used by runtime Jobs (default: DRUID_K8S_REGISTRY_SECRET)") + ServeCommand.Flags().StringVar(&k8sKubeconfig, "k8s-kubeconfig", "", "Kubernetes kubeconfig path for out-of-cluster runtime access (default: DRUID_K8S_KUBECONFIG, KUBECONFIG, or ~/.kube/config)") + ServeCommand.Flags().StringVar(&hubbleRelayAddr, "hubble-relay-addr", "", "Hubble Relay gRPC address for Kubernetes port traffic (default: DRUID_HUBBLE_RELAY_ADDR or hubble-relay.kube-system.svc.cluster.local:80)") +} + +func runRuntimeDaemon() error { + kubernetesConfig := runtimekubernetes.Config{ + Namespace: k8sNamespace, + StorageClass: k8sStorageClass, + PullImage: k8sPullImage, + RegistrySecret: k8sRegistrySecret, + HubbleRelayAddr: hubbleRelayAddr, + Kubeconfig: k8sKubeconfig, + } + store, err := appservices.NewRuntimeStoreForBackend(runtimeStateDir, runtimeBackend, kubernetesConfig) + if err != nil { + return err + } + manager := services.NewRuntimeScrollManager(store) + logManager := services.NewLogManager() + consoleService := services.NewConsoleManager(logManager) + supervisor := appservices.NewRuntimeSupervisor(store, manager, consoleService, runtimeBackend, runtimebackend.WithKubernetesConfig(kubernetesConfig)) + if err := supervisor.Start(); err != nil { + return err + } + + app := fiber.New(fiber.Config{DisableStartupMessage: true}) + runtimehandlers.RegisterRoutes(app, runtimehandlers.RouteHandlers{ + Server: runtimehandlers.NewRuntimeServer( + runtimehandlers.NewHealthHandler(), + runtimehandlers.NewScrollHandler(supervisor), + ), + Websocket: runtimehandlers.NewWebsocketHandler(consoleService), + }) + + return listenRuntimeDaemon(app, store.StateDir()) +} + +func listenRuntimeDaemon(app *fiber.App, stateDir string) error { + if runtimeSocket == "" { + runtimeSocket = utils.DefaultRuntimeSocketPath() + } + if err := os.MkdirAll(filepath.Dir(runtimeSocket), 0755); err != nil { + return err + } + _ = os.Remove(runtimeSocket) + listener, err := net.Listen("unix", runtimeSocket) + if err != nil { + return err + } + defer os.Remove(runtimeSocket) + logger.Log().Info("Starting runtime daemon", zap.String("socket", runtimeSocket), zap.String("stateDir", stateDir)) + return app.Listener(listener) +} diff --git a/cmd/update.go b/apps/druid/adapters/cli/update.go similarity index 66% rename from cmd/update.go rename to apps/druid/adapters/cli/update.go index 7697e0cb..8c682b42 100644 --- a/cmd/update.go +++ b/apps/druid/adapters/cli/update.go @@ -1,9 +1,10 @@ -package cmd +package cli import ( "encoding/json" "fmt" "os" + "path/filepath" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/services/registry" @@ -11,22 +12,27 @@ import ( "github.com/highcard-dev/daemon/internal/utils/logger" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/spf13/cobra" + "github.com/spf13/viper" ) var updateIncludeData bool var UpdateCommand = &cobra.Command{ - Use: "update", + Use: "update [artifact] [dir]", Short: "Check for current version of the Scroll and update if necessary", Long: ``, - Args: cobra.MaximumNArgs(1), + Args: cobra.MaximumNArgs(2), RunE: func(cmd *cobra.Command, args []string) error { + scrollDir := currentWorkingDir() + if len(args) == 2 { + scrollDir = args[1] + } var artifact string if len(args) > 0 { artifact = args[0] } else { - scroll, err := domain.NewScroll(cwd) + scroll, err := domain.NewScroll(scrollDir) if err != nil { return err @@ -39,18 +45,18 @@ var UpdateCommand = &cobra.Command{ return fmt.Errorf("invalid artifact reference %q (expected repo:tag or repo@sha256:digest)", artifact) } if kind == utils.ArtifactRefKindDigest { - return fmt.Errorf("update only supports tag references (repo:tag). For digests, use `druid registry pull %s`", artifact) + return fmt.Errorf("update only supports tag references (repo:tag). For digests, use `druid-client pull %s`", artifact) } tag := ref //ctx := context.Background() logger.Log().Info("Checking for updates for " + artifact) - registryClient := registry.NewOciClient(LoadRegistryStore()) + registryClient := registry.NewOciClient(loadUpdateRegistryStore()) canUpdate := false - fileName := (cwd) + "/manifest.json" + fileName := filepath.Join(scrollDir, "manifest.json") b, err := os.ReadFile(fileName) if err != nil { @@ -72,7 +78,7 @@ var UpdateCommand = &cobra.Command{ if canUpdate { logger.Log().Info("Updated scroll files") - err = registryClient.PullSelective(cwd, artifact, updateIncludeData, nil) + err = registryClient.PullSelective(scrollDir, artifact, updateIncludeData, nil) if err != nil { return fmt.Errorf("error pulling scroll files: %v", err) } @@ -88,3 +94,21 @@ var UpdateCommand = &cobra.Command{ func init() { UpdateCommand.Flags().BoolVar(&updateIncludeData, "include-data", false, "Also pull scroll data layers") } + +func loadUpdateRegistryStore() *registry.CredentialStore { + var registries []domain.RegistryCredential + viper.UnmarshalKey("registries", ®istries) + if len(registries) == 0 { + host := viper.GetString("registry.host") + user := viper.GetString("registry.user") + password := viper.GetString("registry.password") + if host != "" { + registries = append(registries, domain.RegistryCredential{ + Host: host, + Username: user, + Password: password, + }) + } + } + return registry.NewCredentialStore(registries) +} diff --git a/cmd/scroll_validate.go b/apps/druid/adapters/cli/validate.go similarity index 81% rename from cmd/scroll_validate.go rename to apps/druid/adapters/cli/validate.go index 91bec8cd..ca5651ed 100644 --- a/cmd/scroll_validate.go +++ b/apps/druid/adapters/cli/validate.go @@ -1,4 +1,4 @@ -package cmd +package cli import ( "fmt" @@ -9,13 +9,13 @@ import ( var strict bool -var ScrollValidateCmd = &cobra.Command{ +var ValidateCmd = &cobra.Command{ Use: "validate", Short: "Validates the scroll file", Long: `This command validates the scroll file to ensure it meets the required criteria.`, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - scrollDir := cwd + scrollDir := currentWorkingDir() if len(args) > 0 { scrollDir = args[0] } @@ -36,5 +36,5 @@ var ScrollValidateCmd = &cobra.Command{ } func init() { - ScrollValidateCmd.Flags().BoolVar(&strict, "strict", false, "Enable strict validation mode") + ValidateCmd.Flags().BoolVar(&strict, "strict", false, "Enable strict validation mode") } diff --git a/cmd/version.go b/apps/druid/adapters/cli/version.go similarity index 96% rename from cmd/version.go rename to apps/druid/adapters/cli/version.go index 00572b0b..9a0286b5 100644 --- a/cmd/version.go +++ b/apps/druid/adapters/cli/version.go @@ -1,4 +1,4 @@ -package cmd +package cli import ( constants "github.com/highcard-dev/daemon/internal" diff --git a/apps/druid/adapters/http/handlers/health_handler.go b/apps/druid/adapters/http/handlers/health_handler.go new file mode 100644 index 00000000..0079f5e5 --- /dev/null +++ b/apps/druid/adapters/http/handlers/health_handler.go @@ -0,0 +1,16 @@ +package handlers + +import ( + "github.com/gofiber/fiber/v2" + "github.com/highcard-dev/daemon/internal/api" +) + +type HealthHandler struct{} + +func NewHealthHandler() *HealthHandler { + return &HealthHandler{} +} + +func (h *HealthHandler) GetHealthAuth(c *fiber.Ctx) error { + return c.JSON(api.HealthResponse{Mode: "ok"}) +} diff --git a/apps/druid/adapters/http/handlers/routes.go b/apps/druid/adapters/http/handlers/routes.go new file mode 100644 index 00000000..ba9373b6 --- /dev/null +++ b/apps/druid/adapters/http/handlers/routes.go @@ -0,0 +1,27 @@ +package handlers + +import ( + "github.com/gofiber/contrib/websocket" + "github.com/gofiber/fiber/v2" + "github.com/highcard-dev/daemon/internal/api" +) + +type RouteHandlers struct { + Server *RuntimeServer + Websocket *WebsocketHandler +} + +type RuntimeServer struct { + *HealthHandler + *ScrollHandler +} + +func NewRuntimeServer(health *HealthHandler, scrolls *ScrollHandler) *RuntimeServer { + return &RuntimeServer{HealthHandler: health, ScrollHandler: scrolls} +} + +func RegisterRoutes(app *fiber.App, handlers RouteHandlers) { + api.RegisterHandlersWithOptions(app, handlers.Server, api.FiberServerOptions{}) + app.Get("/health", handlers.Server.GetHealthAuth) + app.Get("/ws/v1/scrolls/:id/consoles/:console", websocket.New(handlers.Websocket.AttachConsole)) +} diff --git a/apps/druid/adapters/http/handlers/scroll_handler.go b/apps/druid/adapters/http/handlers/scroll_handler.go new file mode 100644 index 00000000..360ba717 --- /dev/null +++ b/apps/druid/adapters/http/handlers/scroll_handler.go @@ -0,0 +1,115 @@ +package handlers + +import ( + "errors" + + "github.com/gofiber/fiber/v2" + appservices "github.com/highcard-dev/daemon/apps/druid/core/services" + "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services" +) + +type ScrollHandler struct { + supervisor *appservices.RuntimeSupervisor +} + +func NewScrollHandler(supervisor *appservices.RuntimeSupervisor) *ScrollHandler { + return &ScrollHandler{ + supervisor: supervisor, + } +} + +func (h *ScrollHandler) ListScrolls(c *fiber.Ctx) error { + scrolls, err := h.supervisor.List() + if err != nil { + return err + } + return c.JSON(scrolls) +} + +func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { + var request api.CreateScrollRequest + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + name := "" + if request.Name != nil && *request.Name != "" { + name = *request.Name + } else if request.Id != nil && *request.Id != "" { + name = *request.Id + } + scrollRoot := "" + if request.ScrollRoot != nil { + scrollRoot = *request.ScrollRoot + } + dataRoot := "" + if request.DataRoot != nil { + dataRoot = *request.DataRoot + } + runtimeScroll, err := h.supervisor.Create(request.Artifact, name, scrollRoot, dataRoot) + if err != nil { + if errors.Is(err, services.ErrScrollAlreadyExists) { + return fiber.NewError(fiber.StatusConflict, err.Error()) + } + if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + return c.Status(fiber.StatusCreated).JSON(runtimeScroll) +} + +func (h *ScrollHandler) GetScroll(c *fiber.Ctx, id string) error { + runtimeScroll, err := h.getScroll(id) + if err != nil { + return err + } + return c.JSON(runtimeScroll) +} + +func (h *ScrollHandler) DeleteScroll(c *fiber.Ctx, id string) error { + runtimeScroll, err := h.getScroll(id) + if err != nil { + return err + } + if err := h.supervisor.Delete(id); err != nil { + return err + } + return c.JSON(api.DeletedScroll{ + Id: runtimeScroll.ID, + Status: "deleted", + }) +} + +func (h *ScrollHandler) RunScrollCommand(c *fiber.Ctx, id string, command string) error { + runtimeScroll, err := h.getScroll(id) + if err != nil { + return err + } + updated, err := h.supervisor.Run(runtimeScroll.ID, command) + if err != nil { + return err + } + return c.JSON(updated) +} + +func (h *ScrollHandler) GetScrollPorts(c *fiber.Ctx, id string) error { + runtimeScroll, err := h.getScroll(id) + if err != nil { + return err + } + statuses, err := h.supervisor.Ports(runtimeScroll.ID) + if err != nil { + return err + } + return c.JSON(statuses) +} + +func (h *ScrollHandler) getScroll(id string) (*domain.RuntimeScroll, error) { + runtimeScroll, err := h.supervisor.Get(id) + if errors.Is(err, services.ErrScrollNotFound) { + return nil, fiber.NewError(fiber.StatusNotFound, err.Error()) + } + return runtimeScroll, err +} diff --git a/apps/druid/adapters/http/handlers/websocket_handler.go b/apps/druid/adapters/http/handlers/websocket_handler.go new file mode 100644 index 00000000..6dbda07b --- /dev/null +++ b/apps/druid/adapters/http/handlers/websocket_handler.go @@ -0,0 +1,69 @@ +package handlers + +import ( + "time" + + "github.com/gofiber/contrib/websocket" + "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +type WebsocketHandler struct { + consoleService *services.ConsoleManager +} + +func NewWebsocketHandler(consoleService *services.ConsoleManager) *WebsocketHandler { + return &WebsocketHandler{consoleService: consoleService} +} + +func (h *WebsocketHandler) AttachConsole(c *websocket.Conn) { + consoleID := c.Params("console") + defer c.Close() + + console := h.consoleService.GetConsole(consoleID) + if console == nil { + logger.Log().Warn("Console not found", zap.String("console", consoleID)) + return + } + + subscription := console.Channel.Subscribe() + defer console.Channel.Unsubscribe(subscription) + + done := make(chan struct{}) + go func() { + defer close(done) + for { + _, data, err := c.ReadMessage() + if err != nil { + return + } + if console.WriteInput != nil { + if err := console.WriteInput(string(data)); err != nil { + logger.Log().Debug("Failed to write console input", zap.Error(err)) + return + } + } + } + }() + + pingTicker := time.NewTicker(30 * time.Second) + defer pingTicker.Stop() + for { + select { + case <-done: + return + case data, ok := <-subscription: + if !ok || data == nil { + return + } + if err := c.WriteMessage(websocket.TextMessage, *data); err != nil { + return + } + case <-pingTicker.C: + if err := c.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + } + } +} diff --git a/apps/druid/core/services/runtime_controller.go b/apps/druid/core/services/runtime_controller.go new file mode 100644 index 00000000..a705afeb --- /dev/null +++ b/apps/druid/core/services/runtime_controller.go @@ -0,0 +1,539 @@ +package services + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + runtimebackend "github.com/highcard-dev/daemon/internal/runtime" + runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +var ErrRuntimeMaterializationUnsupported = errors.New("runtime backend does not support daemon materialization") + +var newKubernetesRuntimeStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { + return runtimekubernetes.NewConfigMapStateStore(config) +} + +func NewRuntimeStore(stateDir string) (coreservices.RuntimeScrollStore, error) { + if stateDir == "" { + defaultStateDir, err := utils.DefaultRuntimeStateDir() + if err != nil { + return nil, err + } + stateDir = defaultStateDir + } + return coreservices.NewRuntimeStateStore(stateDir), nil +} + +func NewRuntimeStoreForBackend(stateDir string, runtimeBackend string, kubernetesConfig runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { + if runtimeBackend == "kubernetes" { + return newKubernetesRuntimeStore(kubernetesConfig) + } + return NewRuntimeStore(stateDir) +} + +func LoadRuntimeScroll(stateDir string, id string) (coreservices.RuntimeScrollStore, *domain.RuntimeScroll, error) { + store, err := NewRuntimeStore(stateDir) + if err != nil { + return nil, nil, err + } + runtimeScroll, err := store.GetScroll(id) + if err != nil { + if errors.Is(err, coreservices.ErrScrollNotFound) { + return nil, nil, fmt.Errorf("runtime scroll %s not found", id) + } + return nil, nil, err + } + return store, runtimeScroll, nil +} + +type RuntimeSupervisor struct { + store coreservices.RuntimeScrollStore + manager *coreservices.RuntimeScrollManager + consoleService *coreservices.ConsoleManager + runtimeBackend string + runtimeOptions runtimebackend.Options + + mu sync.Mutex + sessions map[string]*RuntimeSession +} + +func NewRuntimeSupervisor( + store coreservices.RuntimeScrollStore, + manager *coreservices.RuntimeScrollManager, + consoleService *coreservices.ConsoleManager, + runtimeBackend string, + options ...runtimebackend.Option, +) *RuntimeSupervisor { + runtimeOptions := runtimebackend.Options{} + for _, option := range options { + option(&runtimeOptions) + } + return &RuntimeSupervisor{ + store: store, + manager: manager, + consoleService: consoleService, + runtimeBackend: runtimeBackend, + runtimeOptions: runtimeOptions, + sessions: map[string]*RuntimeSession{}, + } +} + +func (s *RuntimeSupervisor) Start() error { + scrolls, err := s.store.ListScrolls() + if err != nil { + return err + } + for _, runtimeScroll := range scrolls { + if runtimeScroll.Status == domain.RuntimeScrollStatusDeleted { + continue + } + session, err := s.startSession(runtimeScroll) + if err != nil { + s.markScrollError(runtimeScroll, err) + continue + } + if err := session.Hydrate(); err != nil { + s.markScrollError(runtimeScroll, err) + continue + } + } + return nil +} + +func (s *RuntimeSupervisor) Create(artifact string, name string, scrollRoot string, dataRoot string) (*domain.RuntimeScroll, error) { + runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) + if err != nil { + return nil, err + } + var scrollYAML []byte + if scrollRoot == "" && dataRoot == "" { + materializer, ok := runtimeService.(ports.RuntimeMaterializerInterface) + if !ok { + return nil, ErrRuntimeMaterializationUnsupported + } + materialized, err := materializer.MaterializeScroll(context.Background(), artifact, name) + if err != nil { + return nil, err + } + if materialized.Artifact != "" { + artifact = materialized.Artifact + } + scrollRoot = materialized.ScrollRoot + dataRoot = materialized.DataRoot + scrollYAML = materialized.ScrollYAML + } else { + scrollYAML, err = runtimeService.ReadScrollFile(scrollRoot) + if err != nil { + return nil, err + } + } + runtimeScroll, err := s.manager.Create(artifact, name, scrollRoot, dataRoot, scrollYAML) + if err != nil { + return nil, err + } + session, err := s.startSession(runtimeScroll) + if err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + if err := session.AutoStartServe(); err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + return runtimeScroll, nil +} + +func (s *RuntimeSupervisor) List() ([]*domain.RuntimeScroll, error) { + return s.store.ListScrolls() +} + +func (s *RuntimeSupervisor) Get(id string) (*domain.RuntimeScroll, error) { + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) Delete(id string) error { + s.mu.Lock() + session := s.sessions[id] + delete(s.sessions, id) + s.mu.Unlock() + if session != nil { + session.Shutdown() + } + return s.store.DeleteScroll(id) +} + +func (s *RuntimeSupervisor) Run(id string, command string) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.Run(command) +} + +func (s *RuntimeSupervisor) Ports(id string) ([]domain.RuntimePortStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.Ports() +} + +func (s *RuntimeSupervisor) sessionFor(id string) (*RuntimeSession, error) { + s.mu.Lock() + session := s.sessions[id] + s.mu.Unlock() + if session != nil { + return session, nil + } + runtimeScroll, err := s.store.GetScroll(id) + if err != nil { + return nil, err + } + return s.startSession(runtimeScroll) +} + +func (s *RuntimeSupervisor) startSession(runtimeScroll *domain.RuntimeScroll) (*RuntimeSession, error) { + s.mu.Lock() + if session := s.sessions[runtimeScroll.ID]; session != nil { + s.mu.Unlock() + return session, nil + } + s.mu.Unlock() + + session, err := NewRuntimeSession(s.store, runtimeScroll, s.consoleService, s.runtimeBackend, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) + if err != nil { + return nil, err + } + session.Start() + + s.mu.Lock() + if existing := s.sessions[runtimeScroll.ID]; existing != nil { + s.mu.Unlock() + session.Shutdown() + return existing, nil + } + s.sessions[runtimeScroll.ID] = session + s.mu.Unlock() + return session, nil +} + +func (s *RuntimeSupervisor) markScrollError(runtimeScroll *domain.RuntimeScroll, err error) { + logger.Log().Error("failed to restore runtime scroll", zap.String("scroll", runtimeScroll.ID), zap.Error(err)) + runtimeScroll.Status = domain.RuntimeScrollStatusError + if runtimeScroll.Commands == nil { + runtimeScroll.Commands = map[string]domain.LockStatus{} + } + _ = s.store.UpdateScroll(runtimeScroll) +} + +type RuntimeSession struct { + store coreservices.RuntimeScrollStore + runtimeScroll *domain.RuntimeScroll + scrollService *coreservices.ScrollService + queueManager *coreservices.QueueManager + runtimeBackend ports.RuntimeBackendInterface + + mu sync.Mutex + started bool +} + +func NewRuntimeSession( + store coreservices.RuntimeScrollStore, + runtimeScroll *domain.RuntimeScroll, + consoleService *coreservices.ConsoleManager, + runtimeBackend string, + options ...runtimebackend.Option, +) (*RuntimeSession, error) { + runtimeService, err := runtimebackend.NewBackend(runtimeBackend, consoleService, options...) + if err != nil { + return nil, err + } + if runtimeScroll.DataRoot == "" { + return nil, fmt.Errorf("runtime scroll %s has no data root", runtimeScroll.ID) + } + scrollYAML := []byte(runtimeScroll.ScrollYAML) + if len(scrollYAML) == 0 { + scrollYAML, err = runtimeService.ReadScrollFile(runtimeScroll.ScrollRoot) + if err != nil { + return nil, err + } + runtimeScroll.ScrollYAML = string(scrollYAML) + if err := store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + } + scrollService, err := coreservices.NewCachedScrollService(runtimeScroll.ScrollRoot, scrollYAML) + if err != nil { + return nil, err + } + processLauncher, err := coreservices.NewProcedureLauncher(scrollService, runtimeService, runtimeScroll.DataRoot) + if err != nil { + return nil, err + } + queueManager := coreservices.NewQueueManager(scrollService, processLauncher) + session := &RuntimeSession{ + store: store, + runtimeScroll: runtimeScroll, + scrollService: scrollService, + queueManager: queueManager, + runtimeBackend: runtimeService, + } + queueManager.SetStatusObserver(session.persistCommandStatus) + return session, nil +} + +func (s *RuntimeSession) Start() { + s.mu.Lock() + defer s.mu.Unlock() + if s.started { + return + } + s.started = true + go s.queueManager.Work() +} + +func (s *RuntimeSession) Hydrate() error { + s.mu.Lock() + statuses := copyCommandStatuses(s.runtimeScroll.Commands) + s.mu.Unlock() + if len(statuses) > 0 { + if err := s.queueManager.HydrateCommandStatuses(statuses); err != nil { + return err + } + } + if err := s.AutoStartServe(); err != nil { + return err + } + s.mu.Lock() + s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) + err := s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + return err +} + +func (s *RuntimeSession) AutoStartServe() error { + serveCommand := s.scrollService.GetFile().Serve + if serveCommand == "" { + return nil + } + if err := WriteRuntimeConfig(s.runtimeScroll, s.scrollService.GetFile(), s.runtimeBackend.Name()); err != nil { + return err + } + if err := s.queueManager.AddForcedItem(serveCommand); err != nil && !errors.Is(err, coreservices.ErrAlreadyInQueue) { + return err + } + return nil +} + +func (s *RuntimeSession) Run(command string) (*domain.RuntimeScroll, error) { + if err := WriteRuntimeConfig(s.runtimeScroll, s.scrollService.GetFile(), s.runtimeBackend.Name()); err != nil { + return nil, err + } + s.refreshCommandState() + targetCommand, err := s.scrollService.GetCommand(command) + if err != nil { + s.markError() + return nil, err + } + longRunning := targetCommand.Run == domain.RunModeRestart || targetCommand.Run == domain.RunModePersistent + s.rememberDoneDependencies(targetCommand, map[string]bool{}) + + if err := s.queueManager.AddTempItem(command); err != nil { + s.markError() + return nil, err + } + if !longRunning { + s.queueManager.WaitUntilEmpty() + } + + s.mu.Lock() + s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) + err = s.store.UpdateScroll(s.runtimeScroll) + id := s.runtimeScroll.ID + s.mu.Unlock() + if err != nil { + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSession) refreshCommandState() { + fresh, err := s.store.GetScroll(s.runtimeScroll.ID) + if err != nil { + return + } + s.mu.Lock() + s.runtimeScroll.Commands = copyCommandStatuses(fresh.Commands) + s.runtimeScroll.Status = fresh.Status + s.mu.Unlock() +} + +func (s *RuntimeSession) rememberDoneDependencies(command *domain.CommandInstructionSet, seen map[string]bool) { + if command == nil { + return + } + for _, dependency := range command.Needs { + if seen[dependency] { + continue + } + seen[dependency] = true + status, ok := s.runtimeScroll.Commands[dependency] + if ok && status.Status == domain.ScrollLockStatusDone { + s.queueManager.RememberDoneItem(dependency) + } + dependencyCommand, err := s.scrollService.GetCommand(dependency) + if err == nil { + s.rememberDoneDependencies(dependencyCommand, seen) + } + } +} + +func (s *RuntimeSession) Ports() ([]domain.RuntimePortStatus, error) { + s.mu.Lock() + runtimeScroll := *s.runtimeScroll + s.mu.Unlock() + return s.runtimeBackend.ExpectedPorts(runtimeScroll.DataRoot, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) +} + +func (s *RuntimeSession) Shutdown() { + s.queueManager.Shutdown() +} + +func (s *RuntimeSession) persistCommandStatus(command string, status domain.ScrollLockStatus, exitCode *int) { + s.mu.Lock() + defer s.mu.Unlock() + if s.runtimeScroll.Commands == nil { + s.runtimeScroll.Commands = map[string]domain.LockStatus{} + } + s.runtimeScroll.Commands[command] = domain.LockStatus{ + Status: status, + ExitCode: exitCode, + LastStatusChange: time.Now().Unix(), + } + s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) + if err := s.store.UpdateScroll(s.runtimeScroll); err != nil { + logger.Log().Error("failed to persist command status", zap.String("scroll", s.runtimeScroll.ID), zap.String("command", command), zap.Error(err)) + } +} + +func (s *RuntimeSession) markError() { + s.mu.Lock() + defer s.mu.Unlock() + s.runtimeScroll.Status = domain.RuntimeScrollStatusError + _ = s.store.UpdateScroll(s.runtimeScroll) +} + +func WriteRuntimeConfig(runtimeScroll *domain.RuntimeScroll, scroll *domain.File, runtimeBackend string) error { + if strings.HasPrefix(runtimeScroll.DataRoot, "k8s://") { + return nil + } + configPath := filepath.Join(runtimeScroll.DataRoot, domain.RuntimeDataDir, domain.RuntimeConfigDir, domain.RuntimeConfigFile) + if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { + return err + } + config := domain.RuntimeConfig{ + SchemaVersion: "druid.runtime/v1", + Scroll: domain.RuntimeConfigScroll{ + ID: runtimeScroll.ID, + Name: runtimeScroll.ScrollName, + Artifact: runtimeScroll.Artifact, + }, + Paths: domain.RuntimeConfigPaths{ + Data: ".", + RuntimeConfig: filepath.ToSlash(filepath.Join(domain.RuntimeConfigDir, domain.RuntimeConfigFile)), + }, + Ports: scroll.Ports, + ExpectedPorts: runtimeExpectedPorts(scroll), + Runtime: domain.RuntimeConfigRuntime{ + Backend: runtimeBackend, + GeneratedAt: time.Now().UTC().Format(time.RFC3339Nano), + }, + } + data, err := json.MarshalIndent(config, "", " ") + if err != nil { + return err + } + return os.WriteFile(configPath, data, 0644) +} + +func runtimeExpectedPorts(scroll *domain.File) []domain.RuntimeExpectedPort { + portsByName := map[string]domain.Port{} + for _, port := range scroll.Ports { + portsByName[port.Name] = port + } + ports := []domain.RuntimeExpectedPort{} + for commandName, command := range scroll.Commands { + if command == nil { + continue + } + for idx, procedure := range command.Procedures { + if procedure == nil { + continue + } + procedureName := fmt.Sprintf("%s.%d", commandName, idx) + if procedure.Id != nil { + procedureName = *procedure.Id + } + for _, expectedPort := range procedure.ExpectedPorts { + port := portsByName[expectedPort.Name] + ports = append(ports, domain.RuntimeExpectedPort{ + Name: expectedPort.Name, + Procedure: procedureName, + Port: port.Port, + Protocol: port.Protocol, + KeepAliveTraffic: expectedPort.KeepAliveTraffic, + }) + } + } + } + return ports +} + +func deriveRuntimeScrollStatus(statuses map[string]domain.LockStatus, commands map[string]*domain.CommandInstructionSet) domain.RuntimeScrollStatus { + if len(statuses) == 0 { + return domain.RuntimeScrollStatusCreated + } + hasActive := false + hasPersistentDone := false + for commandName, status := range statuses { + if status.Status == domain.ScrollLockStatusError { + return domain.RuntimeScrollStatusError + } + if status.Status == domain.ScrollLockStatusRunning || status.Status == domain.ScrollLockStatusWaiting { + hasActive = true + } + if status.Status == domain.ScrollLockStatusDone { + if command := commands[commandName]; command != nil && command.Run == domain.RunModePersistent { + hasPersistentDone = true + } + } + } + if hasActive || hasPersistentDone { + return domain.RuntimeScrollStatusRunning + } + return domain.RuntimeScrollStatusStopped +} + +func copyCommandStatuses(statuses map[string]domain.LockStatus) map[string]domain.LockStatus { + copied := map[string]domain.LockStatus{} + for command, status := range statuses { + copied[command] = status + } + return copied +} diff --git a/apps/druid/core/services/runtime_controller_test.go b/apps/druid/core/services/runtime_controller_test.go new file mode 100644 index 00000000..e3400158 --- /dev/null +++ b/apps/druid/core/services/runtime_controller_test.go @@ -0,0 +1,284 @@ +package services + +import ( + "os" + "path/filepath" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" +) + +func TestRuntimeSessionUsesCachedScrollYAML(t *testing.T) { + scrollRoot := t.TempDir() + dataRoot := filepath.Join(t.TempDir(), "data") + runtimeScroll := &domain.RuntimeScroll{ + ID: "cached", + Artifact: "local", + ScrollRoot: scrollRoot, + DataRoot: dataRoot, + ScrollName: "cached", + ScrollYAML: `name: cached +desc: Cached scroll +version: 0.1.0 +app_version: "1.0" +serve: start +commands: + start: + procedures: + - image: alpine:3.20 + command: ["true"] +`, + } + + session, err := NewRuntimeSession(coreservices.NewRuntimeStateStore(t.TempDir()), runtimeScroll, coreservices.NewConsoleManager(coreservices.NewLogManager()), "docker") + if err != nil { + t.Fatal(err) + } + if got := session.scrollService.GetFile().Name; got != "cached" { + t.Fatalf("scroll name = %q, want cached", got) + } +} + +func TestRuntimeSessionHydrateAutoStartsServeWithoutPreviousStatus(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSessionHydrateForceRequeuesDoneServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSessionHydrateRequeuesErrorServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusError}, + }, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSessionHydrateDoesNotDuplicateActiveServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusRunning}, + }, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + queue := session.queueManager.GetQueue() + if len(queue) != 1 { + t.Fatalf("queue len = %d, want 1: %#v", len(queue), queue) + } + if queue["start"] != domain.ScrollLockStatusWaiting { + t.Fatalf("start = %s, want waiting", queue["start"]) + } +} + +func TestRuntimeSessionHydrateSkipsMissingServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + if queue := session.queueManager.GetQueue(); len(queue) != 0 { + t.Fatalf("queue = %#v, want empty", queue) + } +} + +func TestRuntimeSessionAutoStartsServeOnCreatePath(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) + + if err := session.AutoStartServe(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestDeriveRuntimeScrollStatusTreatsDonePersistentAsRunning(t *testing.T) { + status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, map[string]*domain.CommandInstructionSet{ + "start": {Run: domain.RunModePersistent}, + }) + + if status != domain.RuntimeScrollStatusRunning { + t.Fatalf("status = %s, want running", status) + } +} + +func TestDeriveRuntimeScrollStatusTreatsDoneFiniteAsStopped(t *testing.T) { + status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ + "report": {Status: domain.ScrollLockStatusDone}, + }, map[string]*domain.CommandInstructionSet{ + "report": {Run: domain.RunModeAlways}, + }) + + if status != domain.RuntimeScrollStatusStopped { + t.Fatalf("status = %s, want stopped", status) + } +} + +func TestNewRuntimeStoreForBackendUsesKubernetesStoreWithoutStateDB(t *testing.T) { + stateDir := t.TempDir() + called := false + previous := newKubernetesRuntimeStore + newKubernetesRuntimeStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { + called = true + if config.Namespace != "druid" { + t.Fatalf("namespace = %s, want druid", config.Namespace) + } + return fakeRuntimeScrollStore{state: "kubernetes:druid/configmaps"}, nil + } + t.Cleanup(func() { + newKubernetesRuntimeStore = previous + }) + + store, err := NewRuntimeStoreForBackend(stateDir, "kubernetes", runtimekubernetes.Config{Namespace: "druid"}) + if err != nil { + t.Fatal(err) + } + if !called { + t.Fatal("kubernetes store factory was not called") + } + if store.StateDir() != "kubernetes:druid/configmaps" { + t.Fatalf("StateDir = %s, want kubernetes:druid/configmaps", store.StateDir()) + } + if _, err := os.Stat(filepath.Join(stateDir, "state.db")); !os.IsNotExist(err) { + t.Fatalf("state.db stat error = %v, want not exist", err) + } +} + +func TestWriteRuntimeConfigSkipsKubernetesRefs(t *testing.T) { + workingDir := t.TempDir() + previous, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(workingDir); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + _ = os.Chdir(previous) + }) + + err = WriteRuntimeConfig(&domain.RuntimeScroll{ + ID: "container-lab", + Artifact: "artifact", + DataRoot: "k8s://druid/druid-container-lab-data", + ScrollName: "container-lab", + }, &domain.File{}, "kubernetes") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(filepath.Join(workingDir, "k8s:")); !os.IsNotExist(err) { + t.Fatalf("k8s: stat error = %v, want not exist", err) + } +} + +func newRuntimeSessionForTest(t *testing.T, commands map[string]domain.LockStatus, scrollYAML string) *RuntimeSession { + t.Helper() + scrollRoot := t.TempDir() + dataRoot := filepath.Join(t.TempDir(), "data") + store := coreservices.NewRuntimeStateStore(t.TempDir()) + runtimeScroll := &domain.RuntimeScroll{ + ID: "cached", + Artifact: "local", + ScrollRoot: scrollRoot, + DataRoot: dataRoot, + ScrollName: "cached", + ScrollYAML: scrollYAML, + Commands: commands, + } + if err := store.CreateScroll(runtimeScroll); err != nil { + t.Fatal(err) + } + session, err := NewRuntimeSession(store, runtimeScroll, coreservices.NewConsoleManager(coreservices.NewLogManager()), "docker") + if err != nil { + t.Fatal(err) + } + return session +} + +type fakeRuntimeScrollStore struct { + state string +} + +func (f fakeRuntimeScrollStore) StateDir() string { + return f.state +} + +func (f fakeRuntimeScrollStore) ScrollRoot(id string) string { + return "" +} + +func (f fakeRuntimeScrollStore) DataRoot(id string) string { + return "" +} + +func (f fakeRuntimeScrollStore) CreateScroll(scroll *domain.RuntimeScroll) error { + return nil +} + +func (f fakeRuntimeScrollStore) ListScrolls() ([]*domain.RuntimeScroll, error) { + return nil, nil +} + +func (f fakeRuntimeScrollStore) GetScroll(id string) (*domain.RuntimeScroll, error) { + return nil, coreservices.ErrScrollNotFound +} + +func (f fakeRuntimeScrollStore) UpdateScroll(scroll *domain.RuntimeScroll) error { + return nil +} + +func (f fakeRuntimeScrollStore) DeleteScroll(id string) error { + return nil +} + +func cachedScrollYAML(serve string) string { + yaml := `name: cached +desc: Cached scroll +version: 0.1.0 +app_version: "1.0" +` + if serve != "" { + yaml += "serve: " + serve + "\n" + } + yaml += `commands: + start: + run: once + procedures: + - image: alpine:3.20 + command: ["true"] +` + return yaml +} + +func assertQueued(t *testing.T, session *RuntimeSession, command string) { + t.Helper() + queue := session.queueManager.GetQueue() + if queue[command] != domain.ScrollLockStatusWaiting { + t.Fatalf("%s = %s, want waiting; queue=%#v", command, queue[command], queue) + } +} diff --git a/main.go b/apps/druid/main.go similarity index 54% rename from main.go rename to apps/druid/main.go index 31a4a83d..3902c5ff 100644 --- a/main.go +++ b/apps/druid/main.go @@ -3,17 +3,13 @@ package main import ( "os" - "github.com/highcard-dev/daemon/cmd" + "github.com/highcard-dev/daemon/apps/druid/adapters/cli" "github.com/highcard-dev/daemon/internal/utils/logger" ) func main() { logger.Log(logger.WithStructuredLogging()) - //druid start - //druid run [update] - if err := cmd.RootCmd.Execute(); err != nil { - // log it, then + if err := cli.RootCmd.Execute(); err != nil { os.Exit(23) } - } diff --git a/cmd/coldstarter.go b/cmd/coldstarter.go deleted file mode 100644 index f26960d8..00000000 --- a/cmd/coldstarter.go +++ /dev/null @@ -1,44 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - - "github.com/highcard-dev/daemon/internal/core/services" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var ColdstarterCmd = &cobra.Command{ - Use: "coldstarter", - Short: "Starts the coldstarter only", - Long: "Starts the coldstarter only and waits to finish", - RunE: func(cmd *cobra.Command, args []string) error { - - scrollService, err := services.NewScrollService(cwd) - if err != nil { - return fmt.Errorf("failed to load scroll - %w", err) - } - - currentScroll := scrollService.GetCurrent() - - if len(currentScroll.Ports) == 0 { - return fmt.Errorf("no ports found in scroll") - } - - logger.Log().Info("Scroll loaded", zap.String("Name", currentScroll.Name), zap.Any("Version", currentScroll.Version), zap.String("AppVersion", currentScroll.AppVersion), zap.Any("Ports", currentScroll.Ports)) - - portService := services.NewPortServiceWithScrollFile(¤tScroll.File) - - coldStarter := services.NewColdStarter(portService, nil, scrollService.GetDir()) - - finish := coldStarter.Start(context.TODO()) - <-finish - logger.Log().Info("Coldstarter finished") - return nil - }, -} - -func init() { -} diff --git a/cmd/port_monitor.go b/cmd/port_monitor.go deleted file mode 100644 index aed6641e..00000000 --- a/cmd/port_monitor.go +++ /dev/null @@ -1,46 +0,0 @@ -package cmd - -import ( - "fmt" - "strconv" - "time" - - "github.com/highcard-dev/daemon/internal/core/services" - "github.com/spf13/cobra" -) - -var PortMonitorCmd = &cobra.Command{ - Use: "port", - Short: "Monitor ports", - Args: cobra.MinimumNArgs(1), - Long: "Utility to monitor ports and show their status and activity", - RunE: func(cmd *cobra.Command, args []string) error { - - ports := make([]int, len(args)) - - for idx, port := range args { - i, err := strconv.Atoi(port) - if err != nil { - return err - } - ports[idx] = i - } - - portMonitor := services.NewPortService(ports) - - go portMonitor.StartMonitoring(cmd.Context(), watchPortsInterfaces, 1) - - for { - ps := portMonitor.GetPorts() - for _, p := range ps { - fmt.Printf("Port %s: %d, last activity %v, open: %t \n", p.Port.Name, p.Port.Port, p.InactiveSince, p.Open) - } - time.Sleep(5 * time.Second) - } - - }, -} - -func init() { - PortMonitorCmd.Flags().StringArrayVarP(&watchPortsInterfaces, "watch-ports-interfaces", "", []string{"lo0"}, "Interfaces to watch for port activity") -} diff --git a/cmd/registry.go b/cmd/registry.go deleted file mode 100644 index 26fe0aa7..00000000 --- a/cmd/registry.go +++ /dev/null @@ -1,21 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -var RegistryCmd = &cobra.Command{ - Use: "registry", - Short: "Druid Scroll Registry Tool", - Long: `An application that enable managing scrolls versioning and packaging`, - Run: func(cmd *cobra.Command, args []string) { - cmd.Usage() - }, -} - -func init() { - - RegistryCmd.AddCommand(PushCommand) - RegistryCmd.AddCommand(PullCommand) - RegistryCmd.AddCommand(LoginCommand) -} diff --git a/cmd/registry_login.go b/cmd/registry_login.go deleted file mode 100644 index d73ebea5..00000000 --- a/cmd/registry_login.go +++ /dev/null @@ -1,70 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var registryHost string -var registryUser string -var registryPassword string - -var LoginCommand = &cobra.Command{ - Use: "login", - Short: "Login to OCI registry", - Long: `Add or update registry credentials in the configuration. -Supports multiple registries with path-based credential matching. - -Examples: - druid registry login --host registry-1.docker.io -u user -p pass - druid registry login --host artifacts.druid.gg/project1 -u user1 -p pass1 - druid registry login --host artifacts.druid.gg/project2 -u user2 -p pass2`, - RunE: func(cmd *cobra.Command, args []string) error { - - if err := registry.ValidateCredentials(registryHost, registryUser, registryPassword); err != nil { - return fmt.Errorf("login failed: %w", err) - } - - cmd.Println("Login succeeded") - - var registries []domain.RegistryCredential - viper.UnmarshalKey("registries", ®istries) - - newCred := domain.RegistryCredential{ - Host: registryHost, - Username: registryUser, - Password: registryPassword, - } - - found := false - for i := range registries { - if registries[i].Host == registryHost { - registries[i] = newCred - found = true - break - } - } - - if !found { - registries = append(registries, newCred) - } - - viper.Set("registries", registries) - - return viper.WriteConfig() - }, -} - -func init() { - LoginCommand.Flags().StringVarP(®istryHost, "host", "", "", "OCI registry host (e.g., artifacts.druid.gg/project1)") - LoginCommand.Flags().StringVarP(®istryUser, "user", "u", "", "username") - LoginCommand.Flags().StringVarP(®istryPassword, "password", "p", "", "User password") - - LoginCommand.MarkFlagRequired("host") - LoginCommand.MarkFlagRequired("user") - LoginCommand.MarkFlagRequired("password") -} diff --git a/cmd/registry_pull.go b/cmd/registry_pull.go deleted file mode 100644 index e6491337..00000000 --- a/cmd/registry_pull.go +++ /dev/null @@ -1,34 +0,0 @@ -package cmd - -import ( - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" -) - -var noData bool - -var PullCommand = &cobra.Command{ - Use: "pull", - Short: "Pull a scroll from an OCI registry (tag or digest)", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - - artifact := args[0] - - registryClient := registry.NewOciClient(LoadRegistryStore()) - - err := registryClient.PullSelective(cwd, artifact, !noData, nil) - if err != nil { - logger.Log().Error("Failed to pull from registry") - return err - } - - logger.Log().Info("Pulled from registry") - return nil - }, -} - -func init() { - PullCommand.Flags().BoolVarP(&noData, "no-data", "", false, "Download full scroll with data files") -} diff --git a/cmd/registry_push.go b/cmd/registry_push.go deleted file mode 100644 index 111abbcc..00000000 --- a/cmd/registry_push.go +++ /dev/null @@ -1,110 +0,0 @@ -package cmd - -import ( - "fmt" - "path" - "strings" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var minRam string -var minCpu string -var minDisk string -var image string -var scrollPorts []string -var packMeta bool -var smart bool -var category string - -var PushCommand = &cobra.Command{ - Use: "push", - Short: "Generate OCI Artifacts and push to a remote registry", - Args: cobra.MaximumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - - credStore := LoadRegistryStore() - if !credStore.HasCredentials() { - return fmt.Errorf("no registry credentials configured. Please use `druid registry login` to set them") - } - - folder := "." - - fullPath := path.Join(cwd, folder) - - scroll, err := domain.NewScroll(fullPath) - - if err != nil { - return err - } - - repo := scroll.Name - tag := scroll.AppVersion - - if len(args) == 1 { - repo, tag = utils.SplitArtifact(args[0]) - } - - logger.Log().Info("Pushing "+repo+":"+tag+" to registry", zap.String("path", fullPath)) - - ociClient := registry.NewOciClient(credStore) - - overrides := map[string]string{} - if minRam != "" { - overrides["gg.druid.scroll.minRam"] = minRam - } - if minCpu != "" { - overrides["gg.druid.scroll.minCpu"] = minCpu - } - if minDisk != "" { - overrides["gg.druid.scroll.minDisk"] = minDisk - } - if image != "" { - overrides["gg.druid.scroll.image"] = image - } - if smart { - overrides["gg.druid.scroll.smart"] = "true" - } - if category != "" { - overrides["gg.druid.scroll.category"] = category - } - for _, p := range scrollPorts { - parts := strings.Split(p, "=") - name := parts[0] - port := "0" - if len(parts) == 2 { - port = parts[1] - } - overrides[fmt.Sprintf("gg.druid.scroll.port.%s", name)] = port - } - - _, err = ociClient.Push(fullPath, repo, tag, overrides, packMeta, &scroll.File) - if err != nil { - return err - } - - logger.Log().Info("Pushed "+scroll.Name+" to registry", zap.String("path", fullPath)) - return nil - }, -} - -func init() { - PushCommand.AddCommand(PushCategoryCommand) - - PushCommand.Flags().StringVarP(&minRam, "min-ram", "r", minRam, "Minimum RAM required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minRam)") - PushCommand.Flags().StringVarP(&minCpu, "min-cpu", "c", minCpu, "Minimum CPU required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minCpu)") - PushCommand.Flags().StringVarP(&minDisk, "min-disk", "d", minDisk, "Minimum Disk required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minDisk)") - PushCommand.Flags().BoolVarP(&smart, "smart", "s", false, "Indicates, if the scroll is able to run as a smart deployment (Will be added as a manifest annotation gg.druid.scroll.smart)") - PushCommand.Flags().StringVarP(&category, "category", "", category, "Category of the scroll. (Will be added as a manifest annotation gg.druid.scroll.category)") - - PushCommand.Flags().StringVarP(&image, "image", "i", image, "Image to use for the scroll. (Will be added as a manifest annotation gg.druid.scroll.image)") - - PushCommand.Flags().StringSliceVarP(&scrollPorts, "port", "p", scrollPorts, "Ports to expose. Format webserver=80, dns=53/udp or just ftp (Will be added as a manifest annotation gg.druid.scroll.ports.)") - - PushCommand.Flags().BoolVarP(&packMeta, "pack-meta", "m", packMeta, "Pack the meta folder into the scroll.") -} diff --git a/cmd/registry_push_category.go b/cmd/registry_push_category.go deleted file mode 100644 index 99a3a4d6..00000000 --- a/cmd/registry_push_category.go +++ /dev/null @@ -1,53 +0,0 @@ -package cmd - -import ( - "fmt" - "path" - - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var pushCategoryNamePattern string - -// druid push category -var PushCategoryCommand = &cobra.Command{ - Use: "category", - Short: "Push locale markdown files (e.g. de-DE.md) from a scroll directory as separate OCI layers.", - Args: cobra.RangeArgs(2, 3), - RunE: func(cmd *cobra.Command, args []string) error { - - credStore := LoadRegistryStore() - if !credStore.HasCredentials() { - return fmt.Errorf("no registry credentials configured. Please use `druid registry login` to set them") - } - - repo := args[0] - category := args[1] - scrollDir := "." - if len(args) == 3 { - scrollDir = args[2] - } - - fullPath := path.Join(cwd, scrollDir) - - logger.Log().Info("Pushing "+repo+" category to registry", zap.String("scrollDir", fullPath)) - - ociClient := registry.NewOciClient(credStore) - - _, err := ociClient.PushCategory(fullPath, repo, category) - - if err != nil { - return err - } - - logger.Log().Info("Pushed " + repo + " category to registry") - return nil - }, -} - -func init() { - PushCategoryCommand.Flags().StringVar(&pushCategoryNamePattern, "match", "", "Regexp matching file basenames to push (default: locale markdown like de-DE.md)") -} diff --git a/cmd/root.go b/cmd/root.go deleted file mode 100644 index ed2b491e..00000000 --- a/cmd/root.go +++ /dev/null @@ -1,94 +0,0 @@ -package cmd - -import ( - "os" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var envPath string -var cwd string -var ignoreLockfileQueue bool -var configFile string - -var RootCmd = &cobra.Command{ - Use: "druid", - Short: "Druid Daemon that enable Scroll usage and communication", - Long: `A Scalable and Customizable daemon - to run any of your applications with the usage of Scrolls.`, - Run: func(cmd *cobra.Command, args []string) { - cmd.Usage() - }, -} - -func init() { - cobra.OnInitialize(initConfig) - - RootCmd.AddCommand(ServeCommand) - RootCmd.AddCommand(RunCmd) - RootCmd.AddCommand(RegistryCmd) - RootCmd.AddCommand(UpdateCommand) - RootCmd.AddCommand(SemverCmd) - RootCmd.AddCommand(VersionCmd) - RootCmd.AddCommand(ScrollCmd) - RootCmd.AddCommand(PortMonitorCmd) - RootCmd.AddCommand(ColdstarterCmd) - - c, _ := os.Getwd() - - RootCmd.PersistentFlags().StringVarP(&cwd, "cwd", "", c, "Path to environment file (.env)") - - RootCmd.PersistentFlags().StringVarP(&envPath, "env-file", "e", "./.env", "Path to environment file (.env)") - RootCmd.PersistentFlags().StringVar(&configFile, "config", "", "Path to config file (default: ~/.druid.yaml)") - RootCmd.PersistentFlags().BoolVar(&ignoreVersionCheck, "ignore-version-check", false, "Ignore version check between scroll.yaml and scroll-lock.json") - RootCmd.PersistentFlags().BoolVar(&ignoreLockfileQueue, "ignore-lockfile-queue", false, "Skip queuing the lock file") - -} - -func initConfig() { - viper.AutomaticEnv() - - if configFile != "" { - viper.SetConfigFile(configFile) - } else { - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - viper.SetConfigType("yaml") - viper.SetConfigName(".druid") - viper.AddConfigPath(home) - } - - viper.SafeWriteConfig() - viper.ReadInConfig() -} - -func Execute() { - if err := RootCmd.Execute(); err != nil { - os.Exit(1) - } -} - -func LoadRegistryStore() *registry.CredentialStore { - var registries []domain.RegistryCredential - viper.UnmarshalKey("registries", ®istries) - - if len(registries) == 0 { - host := viper.GetString("registry.host") - user := viper.GetString("registry.user") - password := viper.GetString("registry.password") - - if host != "" { - registries = append(registries, domain.RegistryCredential{ - Host: host, - Username: user, - Password: password, - }) - } - } - - return registry.NewCredentialStore(registries) -} diff --git a/cmd/run.go b/cmd/run.go deleted file mode 100644 index 9ce0342e..00000000 --- a/cmd/run.go +++ /dev/null @@ -1,76 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/highcard-dev/daemon/internal/core/services" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var RunCmd = &cobra.Command{ - Use: "run", - Short: "Run single command", - Args: cobra.ExactArgs(1), - Long: `This command runs a single command from the scroll file.`, - RunE: func(cmd *cobra.Command, args []string) error { - command := args[0] - - client := registry.NewOciClient(LoadRegistryStore()) - - ctx := cmd.Context() - - disablePrometheus, ok := ctx.Value("disablePrometheus").(bool) - - //only disable prometheus if context value is set and true - processMonitor := services.NewProcessMonitor(!ok || !disablePrometheus) - - logManager := services.NewLogManager() - consoleService := services.NewConsoleManager(logManager) - processManager := services.NewProcessManager(logManager, consoleService, processMonitor) - scrollService, err := services.NewScrollService(cwd) - if err != nil { - return fmt.Errorf("error creating scroll service: %w", err) - } - processLauncher, err := services.NewProcedureLauncher(client, processManager, services.NewPluginManager(), consoleService, logManager, scrollService, dependencyResolution) - if err != nil { - return err - } - - queueManager := services.NewQueueManager(scrollService, processLauncher) - go queueManager.Work() - _, err = initScroll(scrollService, processLauncher) - if err != nil { - return fmt.Errorf("error initializing scroll: %w", err) - } - - if !ignoreLockfileQueue { - logger.Log().Info("Queuing lock file") - - err = queueManager.QueueLockFile() - if err != nil { - return fmt.Errorf("error queuing lock file: %w", err) - } - } else { - logger.Log().Info("Skipping lock file queue (--ignore-lockfile-queue set)") - } - - logger.Log().Info("Adding command to queue", zap.String("command", command)) - err = queueManager.AddTempItem(command) - if err != nil { - return err - } - - queueManager.WaitUntilEmpty() - - return nil - }, -} - -func init() { - RunCmd.Flags().BoolVarP(&ignoreVersionCheck, "ignore-version-check", "", false, "Ignore version check") - RunCmd.Flags().StringVarP(&dependencyResolution, "dependency-resolution", "", "auto", "Dependency resolution strategy. Valid values: auto, nix, external") - RunCmd.Flags().BoolVarP(&allowPluginErrors, "allow-plugin-errors", "", false, "Ignore plugin errors on startup") -} diff --git a/cmd/scroll.go b/cmd/scroll.go deleted file mode 100644 index 6b7df2c3..00000000 --- a/cmd/scroll.go +++ /dev/null @@ -1,19 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -// scrollCmd represents the command for scrolling -var ScrollCmd = &cobra.Command{ - Use: "scroll", - Short: "Commands related to the scroll file", - Long: `Commands related to the scroll file`, - Run: func(cmd *cobra.Command, args []string) { - cmd.Usage() - }, -} - -func init() { - ScrollCmd.AddCommand(ScrollValidateCmd) -} diff --git a/cmd/serve.go b/cmd/serve.go deleted file mode 100644 index 9bbcd18a..00000000 --- a/cmd/serve.go +++ /dev/null @@ -1,471 +0,0 @@ -package cmd - -import ( - "errors" - "fmt" - "net/http" - "os" - "path/filepath" - _ "net/http/pprof" - "runtime" - "slices" - "sync" - "time" - - "github.com/highcard-dev/daemon/cmd/server/web" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/handler" - "github.com/highcard-dev/daemon/internal/signals" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var jwksUrl, userId string -var ignoreVersionCheck bool -var port int -var shutdownWait int -var additionalEndpoints []string -var idleScroll bool -var watchPorts bool -var watchPortsInterfaces []string -var portInactivity uint -var useColdstarter bool -var maxStartupHealthCheckTimeout uint -var skipArtifactDownload bool -var allowPluginErrors bool -var pprofBind string -var dependencyResolution string - -var ServeCommand = &cobra.Command{ - Use: "serve", - Short: "Initiate a Server and Communication with the Application", - Long: `This command locks the terminal by starting the Daemon, -which in turn compiles the scroll file, enable the API and Websocket -to interact and monitor the Scroll Application`, - Args: cobra.MaximumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - - var artifact string - if len(args) > 0 { - artifact = args[0] - } - - logger.Log().Info("Starting Scroll Daemon") - authorizer, err := services.NewAuthorizer(jwksUrl, userId) - if err != nil { - return err - } - - client := registry.NewOciClient(LoadRegistryStore()) - logManager := services.NewLogManager() - consoleService := services.NewConsoleManager(logManager) - - ctx := cmd.Context() - - disablePrometheus, ok := ctx.Value("disablePrometheus").(bool) - - //only disable prometheus if context value is set and true - processMonitor := services.NewProcessMonitor(!ok || !disablePrometheus) - - defer processMonitor.ShutdownPromMetrics() - - processManager := services.NewProcessManager(logManager, consoleService, processMonitor) - - pluginManager := services.NewPluginManager() - - logger.Log().Info("Starting Process Monitor") - go processMonitor.StartMonitoring() - - scrollService, err := services.NewScrollService(cwd) - if err != nil { - if errors.Is(err, domain.ErrScrollDoesNotExist) { - logger.Log().Warn("Scroll does not exist.") - - if artifact == "" { - return fmt.Errorf("no artifact provided") - } - - if skipArtifactDownload { - return fmt.Errorf("artifact download is disabled") - } - - logger.Log().Info("Downloading " + artifact + " into " + scrollService.GetDir()) - - err = client.PullSelective(scrollService.GetDir(), artifact, false, nil) - if err != nil { - return err - } - - _, err = scrollService.ReloadScroll() - if err != nil { - return err - } - - logger.Log().Info("Installed scroll " + artifact) - } else { - return fmt.Errorf("failed to load scroll - %w", err) - } - } - - currentScroll := scrollService.GetCurrent() - - logger.Log().Info("Scroll loaded", zap.String("Name", currentScroll.Name), zap.Any("Version", currentScroll.Version), zap.String("AppVersion", currentScroll.AppVersion), zap.Any("Ports", currentScroll.Ports)) - - processLauncher, err := services.NewProcedureLauncher(client, processManager, pluginManager, consoleService, logManager, scrollService, dependencyResolution) - if err != nil { - return err - } - - queueManager := services.NewQueueManager(scrollService, processLauncher) - - portService := services.NewPortServiceWithScrollFile(scrollService.GetFile()) - - coldStarter := services.NewColdStarter(portService, queueManager, scrollService.GetDir()) - - var dataPullErr error - var dataPullMu sync.Mutex - - uiDevService := services.NewUiDevService( - queueManager, scrollService, - ) - - scrollHandler := handler.NewScrollHandler(scrollService, pluginManager, processLauncher, queueManager, processManager) - processHandler := handler.NewProcessHandler(processManager) - scrollLogHandler := handler.NewScrollLogHandler(scrollService, logManager, processManager) - scrollMetricHandler := handler.NewScrollMetricHandler(scrollService, processMonitor) - queueHandler := handler.NewQueueHandler(queueManager) - portHandler := handler.NewPortHandler(portService) - healthHandler := handler.NewHealthHandler(portService, maxStartupHealthCheckTimeout, coldStarter.GetProgress()) - coldstarterHandler := handler.NewColdstarterHandler(coldStarter) - uiDevHandler := handler.NewWatchHandler(uiDevService, scrollService) - - var annotationHandler *handler.AnnotationHandler = nil - - if slices.Contains(additionalEndpoints, "annotations") { - annotationHandler = handler.NewAnnotationHandler(scrollService) - } - - websocketHandler := handler.NewWebsocketHandler(authorizer, scrollService, consoleService) - - signalHandler := signals.NewSignalHandler(ctx, queueManager, processManager, nil, shutdownWait) - daemonHander := handler.NewDaemonHandler(signalHandler) - - s := web.NewServer(jwksUrl, scrollHandler, scrollLogHandler, scrollMetricHandler, annotationHandler, processHandler, queueHandler, websocketHandler, portHandler, healthHandler, coldstarterHandler, daemonHander, authorizer, uiDevHandler, cwd, scrollService.GetDir()) - - a := s.Initialize() - - signalHandler.SetApp(a) - - // Data layers are pulled only from OnBeforeFinish (coldstarter Finish), not at bootstrap. - // If --coldstarter is false, Finish never runs and this hook does not execute. - if artifact != "" { - coldStarter.OnBeforeFinish = func(progress *domain.SnapshotProgress) { - markerPath := filepath.Join(scrollService.GetCwd(), domain.DataLoadedMarkerFile) - if markerExists, _ := utils.FileExists(markerPath); markerExists { - logger.Log().Info("Data already loaded (marker present), skipping data pull", zap.String("marker", markerPath)) - return - } - - logger.Log().Info("Pulling data from registry after coldstarter", zap.String("artifact", artifact)) - progress.Mode.Store("restore") - progress.Percentage.Store(0) - - if err := client.PullSelective(scrollService.GetDir(), artifact, true, progress); err != nil { - logger.Log().Error("Failed to pull data from registry", zap.Error(err)) - progress.Mode.Store("noop") - dataPullMu.Lock() - dataPullErr = fmt.Errorf("data pull failed: %w", err) - dataPullMu.Unlock() - signalHandler.Stop() - return - } - - if err := os.WriteFile(markerPath, nil, 0644); err != nil { - logger.Log().Error("Failed to write data-loaded marker", zap.String("path", markerPath), zap.Error(err)) - progress.Mode.Store("noop") - dataPullMu.Lock() - dataPullErr = fmt.Errorf("failed to write data-loaded marker: %w", err) - dataPullMu.Unlock() - signalHandler.Stop() - return - } - - logger.Log().Info("Data pull complete", zap.String("marker", markerPath)) - progress.Percentage.Store(100) - progress.Mode.Store("noop") - } - } - - if watchPorts { - logger.Log().Info("Starting port watcher", zap.Strings("interfaces", watchPortsInterfaces)) - go portService.StartMonitoring(ctx, watchPortsInterfaces, currentScroll.KeepAlivePPM) - } - - logger.Log().Info("Starting queue manager") - go queueManager.Work() - - if !idleScroll { - - doneChan := make(chan error, 1) - go func() { - for { - err := <-doneChan - if err != nil { - logger.Log().Error("Error in Daemon Startup", zap.Error(err)) - signalHandler.Stop() - } - logger.Log().Info("Daemon Startup Complete") - } - }() - - go func() { - if useColdstarter { - if currentScroll.CanColdStart() { - - for { - healthHandler.Started = nil - logger.Log().Info("Starting coldstarter") - finish := coldStarter.Start(ctx) - executedPort := <-finish - - if executedPort == nil { - logger.Log().Info("No port responsible for coldstarter finish, stopping coldstarter immediately") - coldStarter.Stop() - } else if executedPort.FinishAfterCommand == "" { - logger.Log().Info("No finish command set, stopping coldstarter ", zap.Uint("startDelay", executedPort.StartDelay), zap.String("port", executedPort.Name)) - coldStarter.StopWithDeplay(executedPort.StartDelay) - } - - logger.Log().Info("Coldstarter done, starting scroll") - - startup(scrollService, processLauncher, queueManager, portService, coldStarter, healthHandler, cwd, doneChan) - - portService.ResetOpenPorts() - - if !watchPorts { - logger.Log().Warn("watch-port is disabled, skipping inactivty watch") - return - } - - logger.Log().Info("Waiting for inactivity..") - - for { - - if ctx.Err() != nil { - logger.Log().Info("Context cancelled, stopping shutdown") - break - } - - ports := portService.GetPorts() - inactive := true - for _, port := range ports { - if port.InactiveSinceSec < portInactivity { - logger.Log().Info("Port still active", zap.Int("port", port.Port.Port), zap.Uint("InactiveSinceSec", port.InactiveSinceSec)) - inactive = false - break - } - } - if inactive { - logger.Log().Info("Ports inactive, starting shutdown") - break - } else { - logger.Log().Info("Ports still active, waiting..") - time.Sleep(5 * time.Second) - } - } - - signalHandler.ExtendedShutdownRoutine() - } - } else { - logger.Log().Warn("No ports to start, skipping coldstarter") - startup(scrollService, processLauncher, queueManager, portService, coldStarter, healthHandler, cwd, doneChan) - } - } else { - startup(scrollService, processLauncher, queueManager, portService, coldStarter, healthHandler, cwd, doneChan) - } - - }() - } else { - if useColdstarter { - go coldStarter.Start(ctx) - } - } - if pprofBind != "" { - go http.ListenAndServe(pprofBind, nil) - } - err = s.Serve(a, port) - - logger.Log().Info("Shutting down") - - dataPullMu.Lock() - dpErr := dataPullErr - dataPullMu.Unlock() - if dpErr != nil { - return dpErr - } - - return err - }, -} - -func init() { - ServeCommand.Flags().StringVarP(&pprofBind, "pprof", "", "", "Enable pprof on the given bind. This is useful for debugging purposes. E.g. --pprof=localhost:6060 or --pprof=:6060") - - ServeCommand.Flags().IntVarP(&port, "port", "p", 8081, "Port") - - ServeCommand.Flags().IntVarP(&shutdownWait, "shutdown-wait", "", 10, "Wait interval how long the process is allowed to shutdown. First normal shutdown, then forced shutdown") - - ServeCommand.Flags().StringVarP(&jwksUrl, "jwks-server", "", "", "JWKS Server to authenticate requests against") - - ServeCommand.Flags().StringVarP(&userId, "user-id", "u", "", "Allowed user ID, if JWKS is not set. It checks claims.sub of the JWT token") - - ServeCommand.Flags().BoolVarP(&idleScroll, "idle", "", false, "Don't start the queue manager, just use coldstarter") - - ServeCommand.Flags().BoolVarP(&watchPorts, "watch-ports", "", false, "Watch ports, even when coldstarter is not active") - - //macOS specific - if runtime.GOOS == "darwin" { - ServeCommand.Flags().StringArrayVarP(&watchPortsInterfaces, "watch-ports-interfaces", "", []string{"lo0", "en0"}, "Interfaces to watch for port activity") - } else { - ServeCommand.Flags().StringArrayVarP(&watchPortsInterfaces, "watch-ports-interfaces", "", []string{"lo"}, "Interfaces to watch for port activity") - } - - ServeCommand.Flags().BoolVarP(&useColdstarter, "coldstarter", "", false, "Use coldstarter to not start immediately") - - ServeCommand.Flags().BoolVarP(&ignoreVersionCheck, "ignore-version-check", "", false, "Ignore version check") - - ServeCommand.Flags().StringArrayVarP(&additionalEndpoints, "additional-endpoints", "", []string{}, "Additional endpoints to serve. Valid values: annotations") - - ServeCommand.Flags().UintVarP(&portInactivity, "port-inactivity", "", 120, "Port inactivity timeout") - - ServeCommand.Flags().UintVarP(&maxStartupHealthCheckTimeout, "max-health-check-startup-timeout", "", 60, "Sets the max amount of time the health check is allowed to take on startup. If the value is 0, there will be no timeout. This is useful to prevent the health check from blocking the startup of the daemon fully.") - - ServeCommand.Flags().BoolVarP(&skipArtifactDownload, "skip-artifact-download", "", false, "Skip downloading the artifact on startup") - - ServeCommand.Flags().BoolVarP(&allowPluginErrors, "allow-plugin-errors", "", false, "Ignore plugin errors on startup") - - ServeCommand.Flags().StringVarP(&dependencyResolution, "dependency-resolution", "", "auto", "Dependency resolution strategy. Valid values: auto, nix, external") - -} - -func startup(scrollService *services.ScrollService, processLauncher *services.ProcedureLauncher, queueManager *services.QueueManager, portSerivce *services.PortMonitor, coldStarter *services.ColdStarter, healthHandler *handler.HealthHandler, cwd string, doneChan chan error) { - now := time.Now() - healthHandler.Started = &now - - logger.Log().Info("Initializing scroll") - - newScroll, err := initScroll(scrollService, processLauncher) - - if err != nil { - doneChan <- err - return - } - logger.Log().Info("Initialized scroll done") - - currentScroll := scrollService.GetCurrent() - - if newScroll { - logger.Log().Info("Writing new scroll lock") - scrollService.WriteNewScrollLock() - logger.Log().Info("Bootstrapping done") - } - - callbacks := map[string]func(){} - - for _, port := range portSerivce.GetPorts() { - if port.FinishAfterCommand != "" { - callbacks[port.FinishAfterCommand] = func() { - coldStarter.StopWithDeplay(port.StartDelay) - } - } - } - - queueManager.RegisterCallbacks(callbacks) - - if !ignoreLockfileQueue { - logger.Log().Info("Queuing lock file") - - err = queueManager.QueueLockFile() - if err != nil { - doneChan <- err - return - } - } else { - logger.Log().Info("Skipping lock file queue (--ignore-lockfile-queue set)") - } - - // Must run after QueueLockFile so that dependency commands (e.g. install) - // are already populated in the queue. Otherwise RunQueue may re-add them - // as fresh items instead of recognising their "done" state from the lockfile. - logger.Log().Info("Ensuring scroll.serve process is queued") - err = queueManager.AddAndRememberItem(currentScroll.Serve) - if err != nil && !errors.Is(err, services.ErrAlreadyInQueue) && !errors.Is(err, services.ErrCommandDoneOnce) { - doneChan <- err - return - } - - //schedule crons - logger.Log().Info("Schedule crons") - - cronManager := services.NewCronManager(currentScroll.Cronjobs, queueManager) - err = cronManager.Init() - - if err != nil { - doneChan <- err - return - } - - var version string - - if currentScroll.Version != nil { - version = currentScroll.Version.String() - } else { - version = "N/A" - } - - logger.Log().Info("Active Scroll", - zap.String("Description", fmt.Sprintf("%s (%s)", currentScroll.Desc, currentScroll.Name)), - zap.String("Scroll Version", version), - zap.String("cwd", cwd)) - - doneChan <- nil - -} - -func initScroll(scrollService *services.ScrollService, processLauncher *services.ProcedureLauncher) (bool, error) { - - lock, err := scrollService.ReloadLock(ignoreVersionCheck) - if err != nil { - return false, err - } - - newScroll := len(lock.Statuses) == 0 - - if !newScroll { - logger.Log().Info("Found lock file, bootstrapping done") - } - - logger.Log().Info("Rendering cwd templates") - err = scrollService.RenderCwdTemplates() - if err != nil { - return newScroll, err - } - - logger.Log().Info("Launching plugins") - //important to launch plugins, after the templates are rendered, sothat templates can provide for plugins - err = processLauncher.LaunchPlugins() - - if err != nil { - if allowPluginErrors { - logger.Log().Warn("Error launching plugins", zap.Error(err)) - } else { - return newScroll, err - } - } - - return newScroll, nil -} diff --git a/cmd/server/web/middlewares/auth.go b/cmd/server/web/middlewares/auth.go deleted file mode 100644 index 2dede7f0..00000000 --- a/cmd/server/web/middlewares/auth.go +++ /dev/null @@ -1,59 +0,0 @@ -package middlewares - -import ( - "errors" - - "github.com/gofiber/fiber/v2" - "github.com/golang-jwt/jwt/v4" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -type AuthenticationOptions struct { - ValidateQuery bool - FallbackHeaderValidation bool -} - -func TokenAuthentication(authorizerService ports.AuthorizerServiceInterface) fiber.Handler { - return func(ctx *fiber.Ctx) error { - token := ctx.Query("token") - if token != "" { - _, authQueryError := authorizerService.CheckQuery(token) - if authQueryError != nil { - logger.Log().Error("Token Authentication failed", - zap.String(logger.LogKeyContext, logger.LogContextHttp), - zap.String("type", "query"), - zap.Error(authQueryError), - ) - return errors.New("401 - Your spell has no permission to cast that magic!") - } else { - return ctx.Next() - } - } - // Get the Token Authentication credentials from header - if _, authHeaderError := authorizerService.CheckHeader(ctx); authHeaderError != nil { - logger.Log().Error("Token Authentication failed", - zap.String(logger.LogKeyContext, logger.LogContextHttp), - zap.String("type", "header"), - zap.Error(authHeaderError), - ) - return errors.New("401 - Your spell has no permission to cast that magic!") - } - return ctx.Next() - } -} - -func NewUserInjector() fiber.Handler { - return func(ctx *fiber.Ctx) error { - - user := ctx.Locals("user").(*jwt.Token) - - userId, ok := user.Claims.(jwt.MapClaims)["sub"] - if !ok { - return fiber.NewError(fiber.StatusBadRequest, "Invalid user id in jwt sub field") - } - ctx.Context().SetUserValue("userID", userId) - return ctx.Next() - } -} diff --git a/cmd/server/web/middlewares/header.go b/cmd/server/web/middlewares/header.go deleted file mode 100644 index b86f41cc..00000000 --- a/cmd/server/web/middlewares/header.go +++ /dev/null @@ -1,13 +0,0 @@ -package middlewares - -import ( - "github.com/gofiber/fiber/v2" - constants "github.com/highcard-dev/daemon/internal" -) - -func NewHeaderMiddleware() fiber.Handler { - return func(ctx *fiber.Ctx) error { - ctx.Response().Header.Set("Druid-Version", constants.Version) - return ctx.Next() - } -} diff --git a/cmd/server/web/middlewares/validation.go b/cmd/server/web/middlewares/validation.go deleted file mode 100644 index 258cc671..00000000 --- a/cmd/server/web/middlewares/validation.go +++ /dev/null @@ -1,125 +0,0 @@ -package middlewares - -import ( - "bytes" - "context" - "net/http" - - "github.com/getkin/kin-openapi/openapi3" - "github.com/getkin/kin-openapi/openapi3filter" - "github.com/getkin/kin-openapi/routers" - "github.com/getkin/kin-openapi/routers/gorillamux" - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" -) - -// OpenAPIValidator middleware validates incoming requests against the OpenAPI specification -type OpenAPIValidator struct { - router routers.Router - spec *openapi3.T -} - -// NewOpenAPIValidator creates a new OpenAPI validation middleware -func NewOpenAPIValidator() (*OpenAPIValidator, error) { - swagger, err := api.GetSwagger() - if err != nil { - return nil, err - } - - // Create router for finding routes - router, err := gorillamux.NewRouter(swagger) - if err != nil { - return nil, err - } - - return &OpenAPIValidator{ - router: router, - spec: swagger, - }, nil -} - -// Middleware returns a Fiber middleware handler that validates requests -func (v *OpenAPIValidator) Middleware() fiber.Handler { - return func(c *fiber.Ctx) error { - // Convert Fiber context to http.Request - req, err := fiberToHTTPRequest(c) - if err != nil { - return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ - "status": "error", - "error": "Failed to process request", - }) - } - - // Find the route in OpenAPI spec - route, pathParams, err := v.router.FindRoute(req) - if err != nil { - // Route not found in OpenAPI spec, skip validation - return c.Next() - } - - // Validate request - requestValidationInput := &openapi3filter.RequestValidationInput{ - Request: req, - PathParams: pathParams, - Route: route, - Options: &openapi3filter.Options{ - AuthenticationFunc: openapi3filter.NoopAuthenticationFunc, - }, - } - - ctx := context.Background() - if err := openapi3filter.ValidateRequest(ctx, requestValidationInput); err != nil { - return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ - "status": "error", - "error": err.Error(), - }) - } - - return c.Next() - } -} - -// fiberToHTTPRequest converts a Fiber context to a standard http.Request -func fiberToHTTPRequest(c *fiber.Ctx) (*http.Request, error) { - // Get the request body - body := c.Body() - bodyReader := bytes.NewReader(body) - - // Create the HTTP request - method := c.Method() - url := c.OriginalURL() - - // Build full URL with scheme and host - scheme := "http" - if c.Protocol() == "https" { - scheme = "https" - } - fullURL := scheme + "://" + c.Hostname() + url - - req, err := http.NewRequest(method, fullURL, bodyReader) - if err != nil { - return nil, err - } - - // Copy headers - c.Request().Header.VisitAll(func(key, value []byte) { - req.Header.Add(string(key), string(value)) - }) - - // Set Content-Type if present - contentType := c.Get("Content-Type") - if contentType != "" { - req.Header.Set("Content-Type", contentType) - } - - return req, nil -} - -// MustNewOpenAPIValidator creates a new validator or panics on error -func MustNewOpenAPIValidator() *OpenAPIValidator { - validator, err := NewOpenAPIValidator() - if err != nil { - panic(err) - } - return validator -} diff --git a/cmd/server/web/server.go b/cmd/server/web/server.go deleted file mode 100644 index 57fa5b92..00000000 --- a/cmd/server/web/server.go +++ /dev/null @@ -1,244 +0,0 @@ -package web - -import ( - "errors" - "fmt" - "net/http" - - "github.com/gofiber/contrib/websocket" - "github.com/gofiber/fiber/v2" - "github.com/gofiber/fiber/v2/middleware/adaptor" - "github.com/gofiber/fiber/v2/middleware/cors" - "github.com/gofiber/fiber/v2/middleware/filesystem" - jwtware "github.com/gofiber/jwt/v3" - "github.com/highcard-dev/daemon/cmd/server/web/middlewares" - - constants "github.com/highcard-dev/daemon/internal" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" - "golang.org/x/net/webdav" -) - -type Server struct { - corsMiddleware fiber.Handler - injectUserMiddleware fiber.Handler - headerMiddleware fiber.Handler - tokenAuthenticationMiddleware fiber.Handler - jwtMiddleware fiber.Handler - scrollHandler ports.ScrollHandlerInterface - scrollLogHandler ports.ScrollLogHandlerInterface - scrollMetricHandler ports.ScrollMetricHandlerInterface - annotationHandler ports.AnnotationHandlerInterface - processHandler ports.ProcessHandlerInterface - queueHandler ports.QueueHandlerInterface - websocketHandler ports.WebsocketHandlerInterface - portHandler ports.PortHandlerInterface - healthHandler ports.HealthHandlerInterface - coldstarterHandler ports.ColdstarterHandlerInterface - daemonHandler ports.SignalHandlerInterface - watchHandler ports.WatchHandlerInterface - webdavPath string - scrollPath string -} - -func NewServer( - jwlsUrl string, - scrollHandler ports.ScrollHandlerInterface, - scrollLogHandler ports.ScrollLogHandlerInterface, - scrollMetricHandler ports.ScrollMetricHandlerInterface, - annotationHandler ports.AnnotationHandlerInterface, - processHandler ports.ProcessHandlerInterface, - queueHandler ports.QueueHandlerInterface, - websocketHandler ports.WebsocketHandlerInterface, - portHandler ports.PortHandlerInterface, - healthHandler ports.HealthHandlerInterface, - coldstarterHandler ports.ColdstarterHandlerInterface, - daemonHandler ports.SignalHandlerInterface, - authorizerService ports.AuthorizerServiceInterface, - watchHandler ports.WatchHandlerInterface, - webdavPath string, - scrollPath string, -) *Server { - server := &Server{ - corsMiddleware: cors.New(cors.Config{ - AllowOrigins: "*", - AllowHeaders: "Origin, Content-Type, Accept, Authorization, X-DRUID-USER, Depth, Overwrite, Destination, If, Lock-Token, Timeout, DAV", - AllowMethods: "GET,POST,PUT,DELETE,OPTIONS,PATCH,PROPFIND,MKCOL,COPY,MOVE", - ExposeHeaders: "Druid-Version", - }), - injectUserMiddleware: middlewares.NewUserInjector(), - headerMiddleware: middlewares.NewHeaderMiddleware(), - scrollHandler: scrollHandler, - scrollLogHandler: scrollLogHandler, - scrollMetricHandler: scrollMetricHandler, - annotationHandler: annotationHandler, - processHandler: processHandler, - queueHandler: queueHandler, - websocketHandler: websocketHandler, - portHandler: portHandler, - tokenAuthenticationMiddleware: middlewares.TokenAuthentication(authorizerService), - healthHandler: healthHandler, - coldstarterHandler: coldstarterHandler, - webdavPath: webdavPath, - scrollPath: scrollPath, - daemonHandler: daemonHandler, - watchHandler: watchHandler, - } - - if jwlsUrl != "" { - server.jwtMiddleware = jwtware.New(jwtware.Config{ - KeySetURLs: []string{jwlsUrl}, - }) - } - - return server -} - -func (s *Server) Initialize() *fiber.App { - webdavRequestMethods := []string{"PROPFIND", "MKCOL", "COPY", "MOVE"} - - app := fiber.New(fiber.Config{ - // Immutable ensures that all values returned from context methods are immutable - // and safe to store beyond the request lifetime. Without this, Fiber reuses buffers - // which causes data corruption when storing URL parameters as map keys. - Immutable: true, - ErrorHandler: func(ctx *fiber.Ctx, err error) error { - code := fiber.StatusInternalServerError - var e *fiber.Error - if errors.As(err, &e) { - code = e.Code - return ctx.Status(code).JSON(e) - } else { - var e fiber.Error - e.Code = 500 - e.Message = err.Error() - return ctx.Status(code).JSON(e) - } - }, - RequestMethods: append(fiber.DefaultMethods[:], webdavRequestMethods...), - DisableStartupMessage: true, - }) - - s.SetAPI(app) - - return app -} - -func (s *Server) SetAPI(app *fiber.App) *fiber.App { - // Apply global middleware - app.Use(s.headerMiddleware) - app.Use(s.corsMiddleware) - - // Create completely isolated websocket routes FIRST to avoid any middleware pollution - wsRoutes := app.Group("/ws/v1") - wsRoutes.Use(s.tokenAuthenticationMiddleware) - - // Define websocket routes immediately after creating the group - wsRoutes.Get("/serve/:console", websocket.New(s.websocketHandler.HandleProcess)).Name("ws.serve") - wsRoutes.Get("/watch/notify", websocket.New(s.watchHandler.NotifyChange)).Name("ws.watch.notify") - - // Now create other route groups - apiRoutes := app.Group("/") - webdavRoutes := app.Group("/webdav") - - apiRoutes.Use(middlewares.MustNewOpenAPIValidator().Middleware()) - - // Create properly isolated UI route groups - privateUiRoutes := app.Group("") - publicUiRoutes := app.Group("") - - if s.jwtMiddleware != nil { - apiRoutes.Use(s.jwtMiddleware, s.injectUserMiddleware) - webdavRoutes.Use(s.jwtMiddleware, s.injectUserMiddleware) - privateUiRoutes.Use(s.jwtMiddleware, s.injectUserMiddleware) - } - - // Use the generated RegisterHandlersWithOptions to set up all API routes - api.RegisterHandlersWithOptions(apiRoutes, &apiServer{ - ScrollHandlerInterface: s.scrollHandler, - ScrollLogHandlerInterface: s.scrollLogHandler, - ScrollMetricHandlerInterface: s.scrollMetricHandler, - ProcessHandlerInterface: s.processHandler, - QueueHandlerInterface: s.queueHandler, - WebsocketHandlerInterface: s.websocketHandler, - PortHandlerInterface: s.portHandler, - HealthHandlerInterface: s.healthHandler, - ColdstarterHandlerInterface: s.coldstarterHandler, - SignalHandlerInterface: s.daemonHandler, - WatchHandlerInterface: s.watchHandler, - }, api.FiberServerOptions{}) - - // Create the WebDAV handler - webdavHandler := &webdav.Handler{ - Prefix: "/webdav", - FileSystem: webdav.Dir(s.webdavPath), - LockSystem: webdav.NewMemLS(), - } - - webdavRoutes.Use("*", adaptor.HTTPHandler(webdavHandler)) - - apiRoutes.Get("/ports", s.portHandler.GetPorts).Name("ports.list") - - publicUiRoutes.Use("/public", filesystem.New(filesystem.Config{ - Root: http.Dir(s.scrollPath + "/public"), - Browse: false, - })) - - privateUiRoutes.Use("/private", filesystem.New(filesystem.Config{ - Root: http.Dir(s.scrollPath + "/private"), - Browse: false, - })) - - if s.annotationHandler != nil { - app.Get("/annotations", s.annotationHandler.Annotations).Name("annotations.list") - } - app.Get("/metrics", adaptor.HTTPHandler(promhttp.Handler())).Name("metrics") - - app.Get("/health", s.healthHandler.GetHealthAuth).Name("health") - - app.Get("/info", func(ctx *fiber.Ctx) error { - return ctx.JSON(fiber.Map{ - "version": constants.Version, - }) - }) - - //app.Get("/swagger/*", swagger.HandlerDefault) // default - - //Catch-all 404 page - app.Use(func(ctx *fiber.Ctx) error { - return ctx.SendStatus(404) - }) - - return app -} - -func (s *Server) SetDaemonRoute(app *fiber.App, signalHandler ports.SignalHandlerInterface) { - app.Post("/stop", signalHandler.StopDaemon).Name("daemon.stop") -} - -func (s *Server) Serve(app *fiber.App, port int) error { - addr := fmt.Sprintf(":%d", port) - if err := app.Listen(addr); err != nil { - logger.Log().Error("web server error", zap.Error(err)) - return err - } - return nil -} - -// apiServer embeds all handler interfaces to implement api.ServerInterface directly -type apiServer struct { - ports.ScrollHandlerInterface - ports.ScrollLogHandlerInterface - ports.ScrollMetricHandlerInterface - ports.ProcessHandlerInterface - ports.QueueHandlerInterface - ports.WebsocketHandlerInterface - ports.PortHandlerInterface - ports.HealthHandlerInterface - ports.ColdstarterHandlerInterface - ports.SignalHandlerInterface - ports.WatchHandlerInterface -} diff --git a/docs_md/main.go b/docs_md/main.go index eb840169..bc2b0fec 100644 --- a/docs_md/main.go +++ b/docs_md/main.go @@ -9,7 +9,10 @@ import ( "regexp" "strings" - "github.com/highcard-dev/daemon/cmd" + clientcli "github.com/highcard-dev/daemon/apps/druid-client/adapters/cli" + coldstartercli "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/cli" + druidcli "github.com/highcard-dev/daemon/apps/druid/adapters/cli" + "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) @@ -21,9 +24,13 @@ sidebar_label: %s ` -var fixSynopsisRegexp = regexp.MustCompile("(?si)(## druid.*?\n)(.*?)#(## Synopsis\n*\\s*)(.*?)(\\s*\n\n\\s*)((```)(.*?))?#(## Options)(.*?)((### Options inherited from parent commands)(.*?)#(## See Also)(\\s*\\* \\[devspace\\][^\n]*)?(.*))|(#(## See Also)(\\s*\\* \\[devspace\\][^\n]*)?(.*))\n###### Auto generated by spf13/cobra on .*$") +var autoGeneratedFooterRegexp = regexp.MustCompile(`(?m)^###### Auto generated by spf13/cobra on .*$\n?`) +var runtimeSocketDefaultRegexp = regexp.MustCompile(`\(default "[^"]*druid-[^"]*runtime\.sock"\)`) func main() { + if err := clearGeneratedDocs(); err != nil { + log.Fatal(err) + } linkhandler := func(s string) string { return strings.TrimSuffix(s, ".md") @@ -51,12 +58,19 @@ func main() { return fmt.Sprintf(headerTemplate, title, sidebarLabel) } - err := doc.GenMarkdownTreeCustom(cmd.RootCmd, cliDocsDir, filePrepender, linkhandler) - if err != nil { - log.Fatal(err) + roots := []*cobra.Command{ + druidcli.RootCmd, + clientcli.NewRootCommand(), + coldstartercli.NewRootCommand(), + } + for _, root := range roots { + err := doc.GenMarkdownTreeCustom(root, cliDocsDir, filePrepender, linkhandler) + if err != nil { + log.Fatal(err) + } } - err = filepath.Walk(cliDocsDir, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(cliDocsDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -74,7 +88,8 @@ func main() { return err } - newContents := fixSynopsisRegexp.ReplaceAllString(string(content), "$2$3$7$8") + normalized := normalizeGeneratedDefaults(string(content)) + newContents := autoGeneratedFooterRegexp.ReplaceAllString(normalized, "") err = os.WriteFile(path, []byte(newContents), 0) if err != nil { @@ -87,3 +102,26 @@ func main() { log.Fatal(err) } } + +func normalizeGeneratedDefaults(content string) string { + if cwd, err := os.Getwd(); err == nil { + content = strings.ReplaceAll(content, fmt.Sprintf(`(default "%s")`, cwd), `(default ".")`) + } + return runtimeSocketDefaultRegexp.ReplaceAllString(content, `(default "")`) +} + +func clearGeneratedDocs() error { + entries, err := os.ReadDir(cliDocsDir) + if err != nil { + return err + } + for _, entry := range entries { + if entry.IsDir() || filepath.Ext(entry.Name()) != ".md" { + continue + } + if err := os.Remove(filepath.Join(cliDocsDir, entry.Name())); err != nil { + return err + } + } + return nil +} diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..99c8d651 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,32 @@ +# Runtime Examples + +These examples illustrate the container-first, runtime-backend scroll model. + +They intentionally keep commands as orchestration groups and put executable runtime fields on `procedures`. + +Each example declares the container paths it needs with `mounts`. Mounts are sourced from the runtime `data/` directory only. If `sub_path` is omitted, the whole `data/` directory is mounted; otherwise `sub_path` is relative to `data/`. + +## Examples + +- `minecraft`: finite install and coldstart procedures plus a restarting game server procedure. +- `mysql`: restarting database procedure with a persistent data subpath plus a finite backup procedure. +- `static-web`: build-once procedure served by a restarting web procedure. +- `jobs`: finite job-only pipeline that prepares data, transforms it, reports output, and exits. +- `container-lab`: container-only integration example with setup jobs, persistent web/cache services, ports, mounts, env, smoke checks, reports, and signal cleanup. + +Use `druid serve --runtime docker` for container execution. The daemon listens on a Unix socket, and `druid-client` connects to that socket with `--daemon-socket`. The client owns OCI work: `druid-client pull` downloads artifacts, while `druid-client create [name]` materializes a scroll and registers it with the daemon. For already checked-out examples, use `druid-client register [dir]` and omit `[name]` so ids are derived from each example's `scroll.yaml`. Run commands with `druid-client run ` and inspect state with `druid-client describe `. + +Runtime procedures use `image`, `command`, `working_dir`, `env`, `ports`, `mounts`, `signal`, and `tty` directly on each procedure. + +The coldstart gate is a normal command that runs the standalone `druid-coldstarter` binary/image. Build the local image with `make build-coldstarter-image` before running the Minecraft example. Custom coldstart handlers belong under `data/coldstart/` inside the canonical scroll volume. + +The `container-lab` example intentionally avoids coldstarter so it can be used as a broad runtime smoke test for Docker and Kubernetes: + +```bash +druid-client register examples/container-lab +druid-client describe container-lab +druid-client ports container-lab +druid-client run container-lab verify +druid-client run container-lab report +druid-client run container-lab stop +``` diff --git a/examples/container-lab/scroll.yaml b/examples/container-lab/scroll.yaml new file mode 100644 index 00000000..7f94bc91 --- /dev/null +++ b/examples/container-lab/scroll.yaml @@ -0,0 +1,143 @@ +name: ghcr.io/druid-examples/container-lab +desc: Multi-service container-only scroll for runtime backend testing +version: 0.1.0 +app_version: "1.0" + +ports: + - name: http + protocol: http + port: 8080 + mandatory: true + - name: redis + protocol: tcp + port: 6379 + mandatory: true + +serve: "start" +commands: + prepare: + run: once + procedures: + - id: prepare-content + image: alpine:3.20 + env: + LAB_TITLE: Druid container lab + LAB_MESSAGE: persistent services without coldstarter + mounts: + - path: /work + command: + - sh + - -c + - >- + set -eu; + mkdir -p /work/site /work/redis /work/reports; + printf 'Druid container lab

Druid container lab

persistent services without coldstarter

\n' + > /work/site/index.html; + env | grep '^LAB_' | sort > /work/reports/env.txt; + printf 'prepared\n' > /work/reports/prepare.txt + + seed-cache: + needs: [prepare] + run: always + procedures: + - id: seed-cache-files + image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - >- + set -eu; + date -u '+seeded at %Y-%m-%dT%H:%M:%SZ' > /work/redis/seed.txt; + printf 'cache seed refreshed\n' > /work/reports/seed-cache.txt + + start: + needs: [prepare, seed-cache] + run: persistent + procedures: + - id: web + image: python:3.12-alpine + expectedPorts: + - name: http + keepAliveTraffic: 1b/5m + mounts: + - path: /site + sub_path: site + read_only: true + command: + - python + - -m + - http.server + - "8080" + - --directory + - /site + + - id: cache + image: redis:7.4-alpine + expectedPorts: + - name: redis + keepAliveTraffic: 1b/5m + mounts: + - path: /data + sub_path: redis + command: + - redis-server + - --appendonly + - "yes" + - --dir + - /data + + verify: + needs: [start] + run: always + procedures: + - id: verify-http + image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - >- + set -eu; + grep -q 'Druid container lab' /work/site/index.html; + printf 'site content verified\n' > /work/reports/http-verify.txt + + - id: verify-cache + image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - >- + set -eu; + test -s /work/redis/seed.txt; + printf 'cache data directory verified\n' > /work/reports/cache-verify.txt + + report: + needs: [verify] + run: always + procedures: + - id: report + image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - >- + set -eu; + printf 'container lab reports:\n'; + find /work/reports -maxdepth 1 -type f -print -exec sh -c 'echo "--- $1"; cat "$1"' _ {} \; + + stop: + run: always + procedures: + - type: signal + target: web + signal: SIGTERM + - type: signal + target: cache + signal: SIGTERM diff --git a/examples/jobs/scroll.yaml b/examples/jobs/scroll.yaml new file mode 100644 index 00000000..30e1386b --- /dev/null +++ b/examples/jobs/scroll.yaml @@ -0,0 +1,45 @@ +name: ghcr.io/druid-examples/jobs +desc: Finite job pipeline using commands as runtime units +version: 0.1.0 +app_version: "1.0" + +serve: report +commands: + prepare: + run: once + procedures: + - image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - >- + mkdir -p /work/jobs + && printf 'hello from druid jobs\n' > /work/jobs/input.txt + + transform: + needs: [prepare] + run: always + procedures: + - image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - >- + tr '[:lower:]' '[:upper:]' < /work/jobs/input.txt + > /work/jobs/output.txt + + report: + needs: [transform] + run: always + procedures: + - image: alpine:3.20 + mounts: + - path: /work + command: + - sh + - -c + - cat /work/jobs/output.txt diff --git a/examples/minecraft/json.lua b/examples/minecraft/json.lua deleted file mode 100644 index 54d44484..00000000 --- a/examples/minecraft/json.lua +++ /dev/null @@ -1,388 +0,0 @@ --- --- json.lua --- --- Copyright (c) 2020 rxi --- --- Permission is hereby granted, free of charge, to any person obtaining a copy of --- this software and associated documentation files (the "Software"), to deal in --- the Software without restriction, including without limitation the rights to --- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies --- of the Software, and to permit persons to whom the Software is furnished to do --- so, subject to the following conditions: --- --- The above copyright notice and this permission notice shall be included in all --- copies or substantial portions of the Software. --- --- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --- SOFTWARE. --- - -local json = { _version = "0.1.2" } - -------------------------------------------------------------------------------- --- Encode -------------------------------------------------------------------------------- - -local encode - -local escape_char_map = { - [ "\\" ] = "\\", - [ "\"" ] = "\"", - [ "\b" ] = "b", - [ "\f" ] = "f", - [ "\n" ] = "n", - [ "\r" ] = "r", - [ "\t" ] = "t", -} - -local escape_char_map_inv = { [ "/" ] = "/" } -for k, v in pairs(escape_char_map) do - escape_char_map_inv[v] = k -end - - -local function escape_char(c) - return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte())) -end - - -local function encode_nil(val) - return "null" -end - - -local function encode_table(val, stack) - local res = {} - stack = stack or {} - - -- Circular reference? - if stack[val] then error("circular reference") end - - stack[val] = true - - if rawget(val, 1) ~= nil or next(val) == nil then - -- Treat as array -- check keys are valid and it is not sparse - local n = 0 - for k in pairs(val) do - if type(k) ~= "number" then - error("invalid table: mixed or invalid key types") - end - n = n + 1 - end - if n ~= #val then - error("invalid table: sparse array") - end - -- Encode - for i, v in ipairs(val) do - table.insert(res, encode(v, stack)) - end - stack[val] = nil - return "[" .. table.concat(res, ",") .. "]" - - else - -- Treat as an object - for k, v in pairs(val) do - if type(k) ~= "string" then - error("invalid table: mixed or invalid key types") - end - table.insert(res, encode(k, stack) .. ":" .. encode(v, stack)) - end - stack[val] = nil - return "{" .. table.concat(res, ",") .. "}" - end -end - - -local function encode_string(val) - return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"' -end - - -local function encode_number(val) - -- Check for NaN, -inf and inf - if val ~= val or val <= -math.huge or val >= math.huge then - error("unexpected number value '" .. tostring(val) .. "'") - end - return string.format("%.14g", val) -end - - -local type_func_map = { - [ "nil" ] = encode_nil, - [ "table" ] = encode_table, - [ "string" ] = encode_string, - [ "number" ] = encode_number, - [ "boolean" ] = tostring, -} - - -encode = function(val, stack) - local t = type(val) - local f = type_func_map[t] - if f then - return f(val, stack) - end - error("unexpected type '" .. t .. "'") -end - - -function json.encode(val) - return ( encode(val) ) -end - - -------------------------------------------------------------------------------- --- Decode -------------------------------------------------------------------------------- - -local parse - -local function create_set(...) - local res = {} - for i = 1, select("#", ...) do - res[ select(i, ...) ] = true - end - return res -end - -local space_chars = create_set(" ", "\t", "\r", "\n") -local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",") -local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u") -local literals = create_set("true", "false", "null") - -local literal_map = { - [ "true" ] = true, - [ "false" ] = false, - [ "null" ] = nil, -} - - -local function next_char(str, idx, set, negate) - for i = idx, #str do - if set[str:sub(i, i)] ~= negate then - return i - end - end - return #str + 1 -end - - -local function decode_error(str, idx, msg) - local line_count = 1 - local col_count = 1 - for i = 1, idx - 1 do - col_count = col_count + 1 - if str:sub(i, i) == "\n" then - line_count = line_count + 1 - col_count = 1 - end - end - error( string.format("%s at line %d col %d", msg, line_count, col_count) ) -end - - -local function codepoint_to_utf8(n) - -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa - local f = math.floor - if n <= 0x7f then - return string.char(n) - elseif n <= 0x7ff then - return string.char(f(n / 64) + 192, n % 64 + 128) - elseif n <= 0xffff then - return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128) - elseif n <= 0x10ffff then - return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128, - f(n % 4096 / 64) + 128, n % 64 + 128) - end - error( string.format("invalid unicode codepoint '%x'", n) ) -end - - -local function parse_unicode_escape(s) - local n1 = tonumber( s:sub(1, 4), 16 ) - local n2 = tonumber( s:sub(7, 10), 16 ) - -- Surrogate pair? - if n2 then - return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000) - else - return codepoint_to_utf8(n1) - end -end - - -local function parse_string(str, i) - local res = "" - local j = i + 1 - local k = j - - while j <= #str do - local x = str:byte(j) - - if x < 32 then - decode_error(str, j, "control character in string") - - elseif x == 92 then -- `\`: Escape - res = res .. str:sub(k, j - 1) - j = j + 1 - local c = str:sub(j, j) - if c == "u" then - local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1) - or str:match("^%x%x%x%x", j + 1) - or decode_error(str, j - 1, "invalid unicode escape in string") - res = res .. parse_unicode_escape(hex) - j = j + #hex - else - if not escape_chars[c] then - decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string") - end - res = res .. escape_char_map_inv[c] - end - k = j + 1 - - elseif x == 34 then -- `"`: End of string - res = res .. str:sub(k, j - 1) - return res, j + 1 - end - - j = j + 1 - end - - decode_error(str, i, "expected closing quote for string") -end - - -local function parse_number(str, i) - local x = next_char(str, i, delim_chars) - local s = str:sub(i, x - 1) - local n = tonumber(s) - if not n then - decode_error(str, i, "invalid number '" .. s .. "'") - end - return n, x -end - - -local function parse_literal(str, i) - local x = next_char(str, i, delim_chars) - local word = str:sub(i, x - 1) - if not literals[word] then - decode_error(str, i, "invalid literal '" .. word .. "'") - end - return literal_map[word], x -end - - -local function parse_array(str, i) - local res = {} - local n = 1 - i = i + 1 - while 1 do - local x - i = next_char(str, i, space_chars, true) - -- Empty / end of array? - if str:sub(i, i) == "]" then - i = i + 1 - break - end - -- Read token - x, i = parse(str, i) - res[n] = x - n = n + 1 - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "]" then break end - if chr ~= "," then decode_error(str, i, "expected ']' or ','") end - end - return res, i -end - - -local function parse_object(str, i) - local res = {} - i = i + 1 - while 1 do - local key, val - i = next_char(str, i, space_chars, true) - -- Empty / end of object? - if str:sub(i, i) == "}" then - i = i + 1 - break - end - -- Read key - if str:sub(i, i) ~= '"' then - decode_error(str, i, "expected string for key") - end - key, i = parse(str, i) - -- Read ':' delimiter - i = next_char(str, i, space_chars, true) - if str:sub(i, i) ~= ":" then - decode_error(str, i, "expected ':' after key") - end - i = next_char(str, i + 1, space_chars, true) - -- Read value - val, i = parse(str, i) - -- Set - res[key] = val - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "}" then break end - if chr ~= "," then decode_error(str, i, "expected '}' or ','") end - end - return res, i -end - - -local char_func_map = { - [ '"' ] = parse_string, - [ "0" ] = parse_number, - [ "1" ] = parse_number, - [ "2" ] = parse_number, - [ "3" ] = parse_number, - [ "4" ] = parse_number, - [ "5" ] = parse_number, - [ "6" ] = parse_number, - [ "7" ] = parse_number, - [ "8" ] = parse_number, - [ "9" ] = parse_number, - [ "-" ] = parse_number, - [ "t" ] = parse_literal, - [ "f" ] = parse_literal, - [ "n" ] = parse_literal, - [ "[" ] = parse_array, - [ "{" ] = parse_object, -} - - -parse = function(str, idx) - local chr = str:sub(idx, idx) - local f = char_func_map[chr] - if f then - return f(str, idx) - end - decode_error(str, idx, "unexpected character '" .. chr .. "'") -end - - -function json.decode(str) - if type(str) ~= "string" then - error("expected argument of type string, got " .. type(str)) - end - local res, idx = parse(str, next_char(str, 1, space_chars, true)) - idx = next_char(str, idx, space_chars, true) - if idx <= #str then - decode_error(str, idx, "trailing garbage") - end - return res -end - - -return json \ No newline at end of file diff --git a/examples/minecraft/packet_handler/json.lua b/examples/minecraft/packet_handler/json.lua deleted file mode 100644 index 54d44484..00000000 --- a/examples/minecraft/packet_handler/json.lua +++ /dev/null @@ -1,388 +0,0 @@ --- --- json.lua --- --- Copyright (c) 2020 rxi --- --- Permission is hereby granted, free of charge, to any person obtaining a copy of --- this software and associated documentation files (the "Software"), to deal in --- the Software without restriction, including without limitation the rights to --- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies --- of the Software, and to permit persons to whom the Software is furnished to do --- so, subject to the following conditions: --- --- The above copyright notice and this permission notice shall be included in all --- copies or substantial portions of the Software. --- --- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --- SOFTWARE. --- - -local json = { _version = "0.1.2" } - -------------------------------------------------------------------------------- --- Encode -------------------------------------------------------------------------------- - -local encode - -local escape_char_map = { - [ "\\" ] = "\\", - [ "\"" ] = "\"", - [ "\b" ] = "b", - [ "\f" ] = "f", - [ "\n" ] = "n", - [ "\r" ] = "r", - [ "\t" ] = "t", -} - -local escape_char_map_inv = { [ "/" ] = "/" } -for k, v in pairs(escape_char_map) do - escape_char_map_inv[v] = k -end - - -local function escape_char(c) - return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte())) -end - - -local function encode_nil(val) - return "null" -end - - -local function encode_table(val, stack) - local res = {} - stack = stack or {} - - -- Circular reference? - if stack[val] then error("circular reference") end - - stack[val] = true - - if rawget(val, 1) ~= nil or next(val) == nil then - -- Treat as array -- check keys are valid and it is not sparse - local n = 0 - for k in pairs(val) do - if type(k) ~= "number" then - error("invalid table: mixed or invalid key types") - end - n = n + 1 - end - if n ~= #val then - error("invalid table: sparse array") - end - -- Encode - for i, v in ipairs(val) do - table.insert(res, encode(v, stack)) - end - stack[val] = nil - return "[" .. table.concat(res, ",") .. "]" - - else - -- Treat as an object - for k, v in pairs(val) do - if type(k) ~= "string" then - error("invalid table: mixed or invalid key types") - end - table.insert(res, encode(k, stack) .. ":" .. encode(v, stack)) - end - stack[val] = nil - return "{" .. table.concat(res, ",") .. "}" - end -end - - -local function encode_string(val) - return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"' -end - - -local function encode_number(val) - -- Check for NaN, -inf and inf - if val ~= val or val <= -math.huge or val >= math.huge then - error("unexpected number value '" .. tostring(val) .. "'") - end - return string.format("%.14g", val) -end - - -local type_func_map = { - [ "nil" ] = encode_nil, - [ "table" ] = encode_table, - [ "string" ] = encode_string, - [ "number" ] = encode_number, - [ "boolean" ] = tostring, -} - - -encode = function(val, stack) - local t = type(val) - local f = type_func_map[t] - if f then - return f(val, stack) - end - error("unexpected type '" .. t .. "'") -end - - -function json.encode(val) - return ( encode(val) ) -end - - -------------------------------------------------------------------------------- --- Decode -------------------------------------------------------------------------------- - -local parse - -local function create_set(...) - local res = {} - for i = 1, select("#", ...) do - res[ select(i, ...) ] = true - end - return res -end - -local space_chars = create_set(" ", "\t", "\r", "\n") -local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",") -local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u") -local literals = create_set("true", "false", "null") - -local literal_map = { - [ "true" ] = true, - [ "false" ] = false, - [ "null" ] = nil, -} - - -local function next_char(str, idx, set, negate) - for i = idx, #str do - if set[str:sub(i, i)] ~= negate then - return i - end - end - return #str + 1 -end - - -local function decode_error(str, idx, msg) - local line_count = 1 - local col_count = 1 - for i = 1, idx - 1 do - col_count = col_count + 1 - if str:sub(i, i) == "\n" then - line_count = line_count + 1 - col_count = 1 - end - end - error( string.format("%s at line %d col %d", msg, line_count, col_count) ) -end - - -local function codepoint_to_utf8(n) - -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa - local f = math.floor - if n <= 0x7f then - return string.char(n) - elseif n <= 0x7ff then - return string.char(f(n / 64) + 192, n % 64 + 128) - elseif n <= 0xffff then - return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128) - elseif n <= 0x10ffff then - return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128, - f(n % 4096 / 64) + 128, n % 64 + 128) - end - error( string.format("invalid unicode codepoint '%x'", n) ) -end - - -local function parse_unicode_escape(s) - local n1 = tonumber( s:sub(1, 4), 16 ) - local n2 = tonumber( s:sub(7, 10), 16 ) - -- Surrogate pair? - if n2 then - return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000) - else - return codepoint_to_utf8(n1) - end -end - - -local function parse_string(str, i) - local res = "" - local j = i + 1 - local k = j - - while j <= #str do - local x = str:byte(j) - - if x < 32 then - decode_error(str, j, "control character in string") - - elseif x == 92 then -- `\`: Escape - res = res .. str:sub(k, j - 1) - j = j + 1 - local c = str:sub(j, j) - if c == "u" then - local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1) - or str:match("^%x%x%x%x", j + 1) - or decode_error(str, j - 1, "invalid unicode escape in string") - res = res .. parse_unicode_escape(hex) - j = j + #hex - else - if not escape_chars[c] then - decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string") - end - res = res .. escape_char_map_inv[c] - end - k = j + 1 - - elseif x == 34 then -- `"`: End of string - res = res .. str:sub(k, j - 1) - return res, j + 1 - end - - j = j + 1 - end - - decode_error(str, i, "expected closing quote for string") -end - - -local function parse_number(str, i) - local x = next_char(str, i, delim_chars) - local s = str:sub(i, x - 1) - local n = tonumber(s) - if not n then - decode_error(str, i, "invalid number '" .. s .. "'") - end - return n, x -end - - -local function parse_literal(str, i) - local x = next_char(str, i, delim_chars) - local word = str:sub(i, x - 1) - if not literals[word] then - decode_error(str, i, "invalid literal '" .. word .. "'") - end - return literal_map[word], x -end - - -local function parse_array(str, i) - local res = {} - local n = 1 - i = i + 1 - while 1 do - local x - i = next_char(str, i, space_chars, true) - -- Empty / end of array? - if str:sub(i, i) == "]" then - i = i + 1 - break - end - -- Read token - x, i = parse(str, i) - res[n] = x - n = n + 1 - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "]" then break end - if chr ~= "," then decode_error(str, i, "expected ']' or ','") end - end - return res, i -end - - -local function parse_object(str, i) - local res = {} - i = i + 1 - while 1 do - local key, val - i = next_char(str, i, space_chars, true) - -- Empty / end of object? - if str:sub(i, i) == "}" then - i = i + 1 - break - end - -- Read key - if str:sub(i, i) ~= '"' then - decode_error(str, i, "expected string for key") - end - key, i = parse(str, i) - -- Read ':' delimiter - i = next_char(str, i, space_chars, true) - if str:sub(i, i) ~= ":" then - decode_error(str, i, "expected ':' after key") - end - i = next_char(str, i + 1, space_chars, true) - -- Read value - val, i = parse(str, i) - -- Set - res[key] = val - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "}" then break end - if chr ~= "," then decode_error(str, i, "expected '}' or ','") end - end - return res, i -end - - -local char_func_map = { - [ '"' ] = parse_string, - [ "0" ] = parse_number, - [ "1" ] = parse_number, - [ "2" ] = parse_number, - [ "3" ] = parse_number, - [ "4" ] = parse_number, - [ "5" ] = parse_number, - [ "6" ] = parse_number, - [ "7" ] = parse_number, - [ "8" ] = parse_number, - [ "9" ] = parse_number, - [ "-" ] = parse_number, - [ "t" ] = parse_literal, - [ "f" ] = parse_literal, - [ "n" ] = parse_literal, - [ "[" ] = parse_array, - [ "{" ] = parse_object, -} - - -parse = function(str, idx) - local chr = str:sub(idx, idx) - local f = char_func_map[chr] - if f then - return f(str, idx) - end - decode_error(str, idx, "unexpected character '" .. chr .. "'") -end - - -function json.decode(str) - if type(str) ~= "string" then - error("expected argument of type string, got " .. type(str)) - end - local res, idx = parse(str, next_char(str, 1, space_chars, true)) - idx = next_char(str, idx, space_chars, true) - if idx <= #str then - decode_error(str, idx, "trailing garbage") - end - return res -end - - -return json \ No newline at end of file diff --git a/examples/minecraft/packet_handler/minecraft.lua b/examples/minecraft/packet_handler/minecraft.lua deleted file mode 100644 index b3c9902f..00000000 --- a/examples/minecraft/packet_handler/minecraft.lua +++ /dev/null @@ -1,262 +0,0 @@ -json = require("packet_handler/json") - -function string.fromhex(str) - return (str:gsub('..', function(cc) - return string.char(tonumber(cc, 16)) - end)) -end - -function string.tohex(str) - return (str:gsub('.', function(c) - return string.format('%02X', string.byte(c)) - end)) -end - --- Bitwise AND -local function band(a, b) - local result = 0 - local bitval = 1 - while a > 0 and b > 0 do - local abit = a % 2 - local bbit = b % 2 - if abit == 1 and bbit == 1 then - result = result + bitval - end - a = math.floor(a / 2) - b = math.floor(b / 2) - bitval = bitval * 2 - end - return result -end - --- Bitwise OR -local function bor(a, b) - local result = 0 - local bitval = 1 - while a > 0 or b > 0 do - local abit = a % 2 - local bbit = b % 2 - if abit == 1 or bbit == 1 then - result = result + bitval - end - a = math.floor(a / 2) - b = math.floor(b / 2) - bitval = bitval * 2 - end - return result -end - --- Right Shift -local function rshift(value, shift) - return math.floor(value / (2 ^ shift)) -end - --- Left Shift -local function lshift(value, shift) - return value * (2 ^ shift) -end - -function encodeLEB128(value) - local bytes = {} - repeat - local byte = band(value, 0x7F) - value = rshift(value, 7) - if value ~= 0 then - byte = bor(byte, 0x80) - end - table.insert(bytes, byte) - until value == 0 - return bytes -end - -function decodeLEB128(bytes) - local result = 0 - local shift = 0 - local bytesConsumed = 0 -- Track the number of bytes consumed - - for i, byte in ipairs(bytes) do - local value = band(byte, 0x7F) -- Get lower 7 bits - result = bor(result, lshift(value, shift)) -- Add it to result with the correct shift - bytesConsumed = bytesConsumed + 1 -- Increment the byte counter - if band(byte, 0x80) == 0 then -- If the highest bit is not set, we are done - break - end - shift = shift + 7 -- Move to the next group of 7 bits - end - - return result, bytesConsumed -- Return both the result and the number of bytes consumed -end - -function handle(ctx, data) - hex = string.tohex(data) - - debug_print("Received Packet: " .. hex) - - -- check if hex starts with 0x01 0x00 - if hex:sub(1, 4) == "FE01" then - debug_print("Received Legacy Ping Packet") - sendData(string.fromhex( - "ff002300a7003100000034003700000031002e0034002e0032000000410020004d0069006e006500630072006100660074002000530065007200760065007200000030000000320030")) - end - - local packetNo = 0 - - local maxLoops = 2 - - restBytes = data - - while hex ~= "" do - - queue = get_queue() - - hex = string.tohex(restBytes) - - debug_print("Remaining Bytes: " .. hex) - packetNo = packetNo + 1 - debug_print("Packet No: " .. packetNo) - - packetLength, bytesConsumed = decodeLEB128({string.byte(restBytes, 1, 1)}) - debug_print("Packet Length: " .. packetLength) - - -- cut of consumedBytes and read untul packetLength - packetWithLength = string.sub(restBytes, bytesConsumed + 1, packetLength + bytesConsumed) - - -- next varint is the packetid - packetId, bytesConsumed = decodeLEB128({string.byte(packetWithLength, 1, 1)}) - - debug_print("Packet ID: " .. packetId) - - packetWithLengthHex = string.tohex(packetWithLength) - - debug_print("Trimmed Packet: " .. packetWithLengthHex) - - -- make hex to the rest of the data - restBytes = string.sub(restBytes, packetLength + bytesConsumed + 1) - - debug_print("Rest Bytes: " .. string.tohex(restBytes)) - - if packetLength == 1 and packetId == 0 then - debug_print("Received Status Packet " .. packetWithLengthHex) - sendData(pingResponse()) - - -- check if second byte is 0x01 - elseif packetId == 1 then - debug_print("Received Ping Packet " .. packetWithLengthHex) - -- send same packet back - close(data) - -- login packet 0x20 0x00 - elseif packetId == 0 and packetWithLengthHex:sub(-2) == "02" then -- check for enum at the end - debug_print("Received Login Packet " .. packetWithLengthHex) - -- return - -- debug_print("Received Login Packet") - - sendData(disconnectResponse()) - -- sleep for a sec before closing - finish() - -- return - else - debug_print("Received unknown packet " .. packetWithLengthHex) - -- close("") - end - end -end - -function formatResponse(jsonObj) - local response = json.encode(jsonObj) - local responseBuffer = {string.byte(response, 1, -1)} - local additional = {0x00} - local responseBufferLength = encodeLEB128(#responseBuffer) - local packetLenthBuffer = encodeLEB128(#responseBuffer + #responseBufferLength + 1) - - local concatedBytes = {} - - for i = 1, #packetLenthBuffer do - table.insert(concatedBytes, packetLenthBuffer[i]) - end - - for i = 1, #additional do - table.insert(concatedBytes, additional[i]) - end - - for i = 1, #responseBufferLength do - table.insert(concatedBytes, responseBufferLength[i]) - end - - for i = 1, #responseBuffer do - table.insert(concatedBytes, responseBuffer[i]) - end - - -- convert back to string - local finalString = string.char(unpack(concatedBytes)) - - return finalString -end - -function pingResponse() - - local description = { - color = "red", - extra = {"\n", { - color = "gray", - extra = {{ - bold = true, - text = "HINT" - }, ":", " ", { - color = "white", - text = "Get free servers at:" - }, " ", { - color = "green", - text = "druid.gg" - }}, - text = "" - }}, - text = "This server is in standby." - } - - local obj = { - version = { - name = "§9🕐 Waiting...", - protocol = -1 - }, - description = description, - players = { - max = 0, - online = 1 - }, - favicon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAAAXNSR0IArs4c6QAAAMlQTFRFR3BM6ndq5Wxb3WBQ6HFi0EUvvVxI8IBzzTwm0EUv11RC3GBQ7X1w00w50EUv42pa1lRB3mNT4WZV0Ugz2VlH0ks22lpJ0ks332RU1VI/6XZo8oV4421e63Zn32JR0046ytvZ2FZEieHa5nBgb+fZFerZ1NrZDOrZDurZ1tjYQunZztrZO+jZruDZFOrZDOrZDOrZ6HVoDOrZ09rZ0cvJn+LZbebZi+PZkOPZC+rZ942B7Xpr9op98oR29Id67n1uz9vZH+rZjeTZHadAYQAAADl0Uk5TAOr9sP4WBv4CDXqV8kcf3m277CmGPaAzx1Pg8tD90lw3YxDx/mzTQ+aq/nYk/bT50NSS71SwxIbiWYkesQAABERJREFUeNqll2tfozgUxkshIeF+vxWoiNfRUaszuztDC7rf/0PtISAlpR1dfPLzTZLzz3POIUgXp0XD2PJUkGetfbT4fyJI9+xNsuqVbGx1beDPh7uKnazq7e+96lWSqj79XLihpKv691SrRPU/4YLGtsbCp9quNp5BPjreE1j4KYT9ZxPYDbQt7GObW9XwxxHqTUz/EB/a8hbC2+iVJpiRbUdpokE92RwbdVJQcjp+x3Ztay0N1iFClFLk6oqYMEa3thUKeqp74q7zLYjQdUzIgjBhGiqRBohOdaLjo/FIldm6FhWIEH4NG8pGHgiReywJagnd8eqwzCF0cTAhq/TIDt+stzAE79Rz76pAYKMW4ukZKJDr9nzldJcMIHSd3dloYiAWapCm8iu83ECrO00tIHEH87JojCfP78/O7u/x/pQw3bEcYCM9MKALANht9HH42d3Pn389PF9enw/bLNjWapf4vAUcyDCreaMGn91dfb/49gv09HxNegAS5ZohNIUHuGlrIHVH8bcv/0I40+MDEDoVYGEHkkXMZbAWYBIMjOJfIX7Qw3W/0YjkHSBqOTW4DFQNAElIhvxvX76z+MHDfU+AnUyJPwZQG7jjyv64er34NdbNZb/CvMJmYT0GGCkANAXvDbyCAU7vFkJTZgRNGQP8RAamTsYVeOPiH5/6KqD2LNiteWNALMCUaewBXAZcDjTtHajjJhSCLMvRtARTAAEAEwdYWABoRPwhgJWrkYcUeEAAgNMpPF0P5WLii7g+AJxzReS6AGcxCRZXxKQZAwi5ezlo4+Mz7i9NxeKbRB8DQrPhasD1kcsgTJsOwD/KKAcAdGGv9iq+jUvYG1AE2Amj4l8IWKyaxkRkNANJ7Ak3z+e9gahqmAT+OhMAN6VPRjOYvQ7euqfwso9HQdZ0Mn0eoJtVkymYmzu7vfrn4tvNDbxP+gWqJL0BlgF/HbPJJI5/3N39fXk5vBSRBcd0KteEBxClrCoz5Gf1IEYLMvBc7z2+ykQ0eWPnVVUqmLcV5J6PujnqFmJZNf0wdXIIwB5YyN3FQWWWqWrFuh4Xnlhm1btKDx/51xxl/QJPlcrSNM1SyqpBknjsQwdbZZWZOk81RKmaSLLDaTzrsVSVosFT/UiqMhhVto8/9ZlEQpYE5Qk6EDpl3XACLp7vu5llpoUPPKgOIDIIbSHLyOLy50ULJ5PMNTmoQ6zmzlICLR3bCunitAi1gJDH+MAZaj+7PU8pdJd+9I2ttIQ1nmRHEUIUk8WHQpYjSXlBF3NFaGFKkqkgMhtB41ySnMDFswlYt5fSMorpbBPEDRww4bl4LgKakbcm1gh/IY3WhKjPRhDDa004wXwE1kWzQxhzEciynRYhFuHcx8JQGGKZe7FLZ3a0RbB7qIRzERbUorURWWhuQ9Zq5CyXS0dBs++HbwU5EKwv3FJDh2rk/uILoqFlT38O/QdGyOZnTVzZRwAAAABJRU5ErkJggg==" - } - - local snapshotMode = get_snapshot_mode() - local snapshotPercentage = get_snapshot_percentage() - - if snapshotMode ~= "noop" then - if snapshotMode == "restore" then - if snapshotPercentage == nil or snapshotPercentage == 100 then - obj.version.name = "§2▶ Extracting snapshot..." - else - obj.version.name = "§2▶ Downloading snapshot... " + string.format("%.2f", snapshotPercentage) + "%" - end - obj.description = "Restoring Minecraft Server, this might take a moment" - else - if snapshotPercentage == nil or snapshotPercentage == 100 then - obj.version.name = "§2▶ Backing up..." - else - obj.version.name = "§2▶ Backing up... " + string.format("%.2f", snapshotPercentage) + "%" - end - obj.description = "Backing up Minecraft Server, this might take a moment" - end - elseif queue ~= nil and queue["install"] == "running" then - obj.version.name = "§2▶ Installing..." - obj.description = "Installing Minecraft Server, this might take a moment" - elseif get_finish_sec() ~= nil then - obj.version.name = "§2▶ Starting..." - obj.description = "Starting " .. math.ceil(get_finish_sec()) .. "s" - end - - return formatResponse(obj) -end - -function disconnectResponse() - local obj = "Our super cool system will start now... please wait" - return formatResponse(obj) -end diff --git a/examples/minecraft/packet_handler/query.lua b/examples/minecraft/packet_handler/query.lua deleted file mode 100644 index 796fcedf..00000000 --- a/examples/minecraft/packet_handler/query.lua +++ /dev/null @@ -1,187 +0,0 @@ -function string.fromhex(str) - return (str:gsub('..', function(cc) - return string.char(tonumber(cc, 16)) - end)) -end - -function string.tohex(str) - return (str:gsub('.', function(c) - return string.format('%02X', string.byte(c)) - end)) -end - -function handle(ctx, data) - - -- prtocol begins with FFFFFFFF and the packedid - - -- get packet index - - -- check if start with FFFFFFFF - - hex = string.tohex(data) - - if string.sub(hex, 1, 8) ~= "FFFFFFFF" then - debug_print("Invalid Packet " .. hex) - return - end - - packetId = string.sub(hex, 9, 10) - - payload = string.sub(hex, 11) - - -- check if packet is 54 - - debug_print("Packet ID: " .. packetId) - - if packetId == "55" then - - if payload == "FFFFFFFF" or payload == "00000000" then - debug_print("Received Packet: " .. hex) - resHex = string.fromhex("FFFFFFFF414BA1D522") -- this is not good, as we allways pass the same key for the challenge - ctx.sendData(resHex) - return - end - - if payload == "4BA1D522" then - debug_print("Received Packet: " .. hex) - resHex = string.fromhex("FFFFFFFF4400") -- this is not good to be hardcoded, but fine for now - - ctx.sendData(resHex) - return - end - debug_print("Bad challenge: " .. hex) - return - end - - if packetId == "56" then - - if payload == "FFFFFFFF" or payload == "00000000" then - debug_print("Received Packet: " .. hex) - resHex = string.fromhex("FFFFFFFF414BA1D522") -- this is not good, as we allways pass the same key for the challenge - ctx.sendData(resHex) - return - end - - if payload == "4BA1D522" then - debug_print("Received Packet: " .. hex) - resHex = string.fromhex( - "FFFFFFFF451A00414C4C4F57444F574E4C4F414443484152535F69003100414C4C4F57444F574E4C4F41444954454D535F69003100436C757374657249645F73004B4150323032326E76637738393233386E3332726677653900435553544F4D5345525645524E414D455F73006B617020707670202F20342D6D616E202F2078352D783235202F20776F726B65727320667269656E646C79207365727665720044617954696D655F730037360047616D654D6F64655F73005465737447616D654D6F64655F43004841534143544956454D4F44535F690031004C45474143595F690030004D4154434854494D454F55545F66003132302E303030303030004D4F44305F7300323839373838353837383A4544393730443545343845324143433334333545374339373345434135373637004D4F44315F7300323536343534363435353A3934413336414236343933453241443335364631343142313932383633453445004D4F44325F7300333034363539363536343A3832453245393730343446444139463642464237353439443730433337423133004D4F44335F7300313939393434373137323A3836453432424644343646453430363338443639344141384342453634344134004D6F6449645F6C0030004E6574776F726B696E675F690030004E554D4F50454E505542434F4E4E003530004F4646494349414C5345525645525F690030004F574E494E474944003930323032313035363131373133353337004F574E494E474E414D45003930323032313035363131373133353337005032504144445200393032303231303536313137313335333700503250504F52540037373837005345415243484B4559574F5244535F7300437573746F6D0053657276657250617373776F72645F620066616C73650053455256455255534553424154544C4559455F6200747275650053455353494F4E464C41475300313730370053455353494F4E49535056455F69003000") -- this is not good to be hardcoded, but fine for now - - ctx.sendData(resHex) - return - end - debug_print("Bad challenge: " .. hex) - return - end - - if packetId == "54" then - - queue = get_queue() - name = get_var("ServerListName") or "Coldstarter is cool (server is idle, join to start)" - - map = get_var("MapName") or "server idle" - - local finishSec = get_finish_sec() - - if finishSec ~= nil then - finishSec = math.ceil(finishSec) - end - - if queue ~= nil and queue["install"] == "running" then - if finishSec ~= nil then - -- finish sec is not necissary applicable, but it's better to show something I guess - name = get_var("ServerListNameInstalling") or - string.format("INSTALLING, this might take a moment - %ds", finishSec) - else - name = get_var("ServerListNameInstalling") or "INSTALLING, this might take a moment" - end - - map = get_var("MapNameInstalling") or "installing server" - elseif finishSec ~= nil then - nameTemplate = get_var("ServerListNameStarting") or "Druid Gameserver (starting) - %ds" - name = string.format(nameTemplate, finishSec) - end - - folder = get_var("GameSteamFolder") or "ark_survival_evolved" - - gameName = get_var("GameName") or "ARK: Survival Evolved" - - steamIdString = get_var("GameSteamId") or "0" - - steamId = tonumber(steamIdString) - - serverPort = get_port("main") - - -- hex - nameHex = string.tohex(name) - - mapHex = string.tohex(map) - - folderHex = string.tohex(folder) -- ark: ark_survival_evolved - - steamIdHex = number_to_little_endian_short(steamId) - - gameHex = string.tohex(gameName) - - maxPlayerHex = "00" - playerHex = "00" - botHex = "00" - - serverTypeHex = "64" -- dedicated - - osHex = "6C" -- l (6C) for linux, w (77) for windows - - vacHex = "01" -- 01 for secure, 00 for insecure - - version = string.tohex("1.0.0.0") - - -- EDF & 0x80: Port - -- EDF & 0x10: SteamID - -- EDF & 0x20 Keywords - -- EDF & 0x01 GameID - - edfFlagHex = "B1" - - -- short as hex - gamePortHex = number_to_little_endian_short(serverPort) - - steamId = "01D075C44C764001" - - tags = - ",OWNINGID:90202064633057281,OWNINGNAME:90202064633057281,NUMOPENPUBCONN:50,P2PADDR:90202064633057281,P2PPORT:" .. - serverPort .. ",LEGACY_i:0" - - tagsHex = string.tohex(tags) - - edfHex = gamePortHex .. steamId .. tagsHex .. "00" .. "FE47050000000000" - - res = - "FFFFFFFF4911" .. nameHex .. "00" .. mapHex .. "00" .. folderHex .. "00" .. gameHex .. "00" .. steamIdHex .. - playerHex .. maxPlayerHex .. botHex .. serverTypeHex .. osHex .. vacHex .. version .. "00" .. edfFlagHex .. - edfHex - - debug_print("Response length: " .. string.len(tags)) - - resHex = string.fromhex(res) - - ctx.sendData(resHex) - return - end - - debug_print("Unknown Packet: " .. hex) - -end - -function number_to_little_endian_short(num) - -- Ensure the number is in the 16-bit range for unsigned short - if num < 0 or num > 65535 then - error("Number " .. num .. " out of range for 16-bit unsigned short") - end - - -- Convert the number to two bytes in little-endian format - local low_byte = num % 256 -- Least significant byte - local high_byte = math.floor(num / 256) % 256 -- Most significant byte - - -- Format as hexadecimal string - return string.format("%02X%02X", low_byte, high_byte) -end diff --git a/examples/minecraft/scroll.yaml b/examples/minecraft/scroll.yaml index 5f9c3c3d..9a2662c9 100644 --- a/examples/minecraft/scroll.yaml +++ b/examples/minecraft/scroll.yaml @@ -1,78 +1,69 @@ -name: registry-1.docker.io/highcard/scroll-minecraft-spigot -desc: Minecraft Spigot -version: 0.0.1 +name: ghcr.io/druid-examples/minecraft +desc: Minecraft server using commands as runtime units +version: 0.1.0 app_version: 1.20.4 -keepAlivePPM: 5 + ports: - name: minecraft protocol: tcp port: 25565 - sleep_handler: packet_handler/minecraft.lua + sleep_handler: generic mandatory: true - - name: query - protocol: udp - sleep_handler: packet_handler/query.lua - vars: - - name: GameName - value: "ARK: Survival Evolved" - - name: GameSteamFolder - value: ark_survival_evolved - - name: GameSteamId - value: "0" - - name: MapName - value: server idle - - name: ServerListName - value: "⏸️ Druid Gameserver (idle) - Start server by joining" - - name: ServerListNameStarting - value: "▶️ Druid Gameserver (starting) - %ds" - - name: main - protocol: udp serve: "start" commands: + install: + run: once + procedures: + - image: eclipse-temurin:21-jre + mounts: + - path: /server + working_dir: /server + command: + - sh + - -c + - >- + curl -fsSL -o server.jar + https://piston-data.mojang.com/v1/objects/8dd1a28015f51b1803213892b50b7b4fc76e594d/server.jar + && echo eula=true > eula.txt + start: needs: [install] run: restart - dependencies: [jdk17] procedures: - - mode: exec - data: - - java - - -version - - mode: exec - id: start-process - data: + - id: coldstart + image: druid-coldstarter:local + expectedPorts: + - name: minecraft + keepAliveTraffic: 10kb/5m + mounts: + - path: /runtime + command: + - druid-coldstarter + - --runtime-config + - /runtime/.druid/runtime.json + - --status-file + - .coldstarter-finished.json + + - id: start + image: eclipse-temurin:21-jre + expectedPorts: + - name: minecraft + keepAliveTraffic: 10kb/5m + mounts: + - path: /server + working_dir: /server + command: - java - -Xmx1024M - -Xms1024M - -jar - - spigot.jar + - server.jar - nogui + stop: + run: always procedures: - - mode: stdin - data: - - start-process - - stop - install: - run: once - procedures: - - mode: exec - data: - - wget - - -O - - spigot.jar - - https://launcher.mojang.com/v1/objects/8dd1a28015f51b1803213892b50b7b4fc76e594d/server.jar - - mode: exec - data: - - bash - - -c - - echo eula=true > eula.txt - restart: - procedures: - - mode: command - data: - - stop - - mode: command - data: - - start + - type: signal + target: start + signal: SIGTERM diff --git a/examples/mysql/scroll.yaml b/examples/mysql/scroll.yaml new file mode 100644 index 00000000..67166f00 --- /dev/null +++ b/examples/mysql/scroll.yaml @@ -0,0 +1,47 @@ +name: ghcr.io/druid-examples/mysql +desc: MySQL server using commands as runtime units +version: 0.1.0 +app_version: "8.4" + +ports: + - name: mysql + protocol: tcp + port: 3306 + mandatory: true + +serve: "start" +commands: + start: + run: restart + procedures: + - image: mysql:8.4 + expectedPorts: + - name: mysql + keepAliveTraffic: 1b/5m + env: + MYSQL_DATABASE: app + MYSQL_USER: app + MYSQL_PASSWORD: from-secret:mysql-password + MYSQL_ROOT_PASSWORD: from-secret:mysql-root-password + mounts: + - path: /var/lib/mysql + sub_path: mysql + + backup: + run: always + procedures: + - image: mysql:8.4 + command: + - sh + - -c + - MYSQL_PWD=$(cat /run/secrets/mysql-root-password) mysqldump -h start -u root --all-databases > /backup/dump.sql + mounts: + - path: /backup + sub_path: backups + + stop: + run: always + procedures: + - type: signal + target: start + signal: SIGTERM diff --git a/examples/nginx/scroll-lock.json b/examples/nginx/scroll-lock.json deleted file mode 100644 index 59adcbf4..00000000 --- a/examples/nginx/scroll-lock.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "statuses": { "start": "running" }, - "scroll_version": "0.0.1", - "scroll_name": "registry-1.docker.io/highcard/scroll-nginx" -} diff --git a/examples/nginx/scroll.yaml b/examples/nginx/scroll.yaml deleted file mode 100644 index 0c18cc16..00000000 --- a/examples/nginx/scroll.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: registry-1.docker.io/highcard/scroll-nginx -desc: Nginx server -version: 0.0.1 -app_version: latest -serve: "start" -commands: - start: - # the nginx command detaches the process - #run: restart - procedures: - - mode: exec - data: - - nginx - stop: - procedures: - - mode: exec - data: - - nginx - - -s - - stop diff --git a/examples/scroll-cwd-pull/annotations.json b/examples/scroll-cwd-pull/annotations.json deleted file mode 100644 index 7385c20d..00000000 --- a/examples/scroll-cwd-pull/annotations.json +++ /dev/null @@ -1 +0,0 @@ -{"org.opencontainers.image.created":"2026-02-24T12:09:11Z"} \ No newline at end of file diff --git a/examples/scroll-cwd-pull/manifest.json b/examples/scroll-cwd-pull/manifest.json deleted file mode 100644 index 18bdd74a..00000000 --- a/examples/scroll-cwd-pull/manifest.json +++ /dev/null @@ -1 +0,0 @@ -{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:6c9c78ae3f0395028791a99e7fe4990e7ff2d8e8ba07ecea7d46ba9894d3c48d","size":3673} \ No newline at end of file diff --git a/examples/scroll-cwd-pull/packet_handler/json.lua b/examples/scroll-cwd-pull/packet_handler/json.lua deleted file mode 100644 index 54d44484..00000000 --- a/examples/scroll-cwd-pull/packet_handler/json.lua +++ /dev/null @@ -1,388 +0,0 @@ --- --- json.lua --- --- Copyright (c) 2020 rxi --- --- Permission is hereby granted, free of charge, to any person obtaining a copy of --- this software and associated documentation files (the "Software"), to deal in --- the Software without restriction, including without limitation the rights to --- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies --- of the Software, and to permit persons to whom the Software is furnished to do --- so, subject to the following conditions: --- --- The above copyright notice and this permission notice shall be included in all --- copies or substantial portions of the Software. --- --- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --- SOFTWARE. --- - -local json = { _version = "0.1.2" } - -------------------------------------------------------------------------------- --- Encode -------------------------------------------------------------------------------- - -local encode - -local escape_char_map = { - [ "\\" ] = "\\", - [ "\"" ] = "\"", - [ "\b" ] = "b", - [ "\f" ] = "f", - [ "\n" ] = "n", - [ "\r" ] = "r", - [ "\t" ] = "t", -} - -local escape_char_map_inv = { [ "/" ] = "/" } -for k, v in pairs(escape_char_map) do - escape_char_map_inv[v] = k -end - - -local function escape_char(c) - return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte())) -end - - -local function encode_nil(val) - return "null" -end - - -local function encode_table(val, stack) - local res = {} - stack = stack or {} - - -- Circular reference? - if stack[val] then error("circular reference") end - - stack[val] = true - - if rawget(val, 1) ~= nil or next(val) == nil then - -- Treat as array -- check keys are valid and it is not sparse - local n = 0 - for k in pairs(val) do - if type(k) ~= "number" then - error("invalid table: mixed or invalid key types") - end - n = n + 1 - end - if n ~= #val then - error("invalid table: sparse array") - end - -- Encode - for i, v in ipairs(val) do - table.insert(res, encode(v, stack)) - end - stack[val] = nil - return "[" .. table.concat(res, ",") .. "]" - - else - -- Treat as an object - for k, v in pairs(val) do - if type(k) ~= "string" then - error("invalid table: mixed or invalid key types") - end - table.insert(res, encode(k, stack) .. ":" .. encode(v, stack)) - end - stack[val] = nil - return "{" .. table.concat(res, ",") .. "}" - end -end - - -local function encode_string(val) - return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"' -end - - -local function encode_number(val) - -- Check for NaN, -inf and inf - if val ~= val or val <= -math.huge or val >= math.huge then - error("unexpected number value '" .. tostring(val) .. "'") - end - return string.format("%.14g", val) -end - - -local type_func_map = { - [ "nil" ] = encode_nil, - [ "table" ] = encode_table, - [ "string" ] = encode_string, - [ "number" ] = encode_number, - [ "boolean" ] = tostring, -} - - -encode = function(val, stack) - local t = type(val) - local f = type_func_map[t] - if f then - return f(val, stack) - end - error("unexpected type '" .. t .. "'") -end - - -function json.encode(val) - return ( encode(val) ) -end - - -------------------------------------------------------------------------------- --- Decode -------------------------------------------------------------------------------- - -local parse - -local function create_set(...) - local res = {} - for i = 1, select("#", ...) do - res[ select(i, ...) ] = true - end - return res -end - -local space_chars = create_set(" ", "\t", "\r", "\n") -local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",") -local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u") -local literals = create_set("true", "false", "null") - -local literal_map = { - [ "true" ] = true, - [ "false" ] = false, - [ "null" ] = nil, -} - - -local function next_char(str, idx, set, negate) - for i = idx, #str do - if set[str:sub(i, i)] ~= negate then - return i - end - end - return #str + 1 -end - - -local function decode_error(str, idx, msg) - local line_count = 1 - local col_count = 1 - for i = 1, idx - 1 do - col_count = col_count + 1 - if str:sub(i, i) == "\n" then - line_count = line_count + 1 - col_count = 1 - end - end - error( string.format("%s at line %d col %d", msg, line_count, col_count) ) -end - - -local function codepoint_to_utf8(n) - -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa - local f = math.floor - if n <= 0x7f then - return string.char(n) - elseif n <= 0x7ff then - return string.char(f(n / 64) + 192, n % 64 + 128) - elseif n <= 0xffff then - return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128) - elseif n <= 0x10ffff then - return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128, - f(n % 4096 / 64) + 128, n % 64 + 128) - end - error( string.format("invalid unicode codepoint '%x'", n) ) -end - - -local function parse_unicode_escape(s) - local n1 = tonumber( s:sub(1, 4), 16 ) - local n2 = tonumber( s:sub(7, 10), 16 ) - -- Surrogate pair? - if n2 then - return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000) - else - return codepoint_to_utf8(n1) - end -end - - -local function parse_string(str, i) - local res = "" - local j = i + 1 - local k = j - - while j <= #str do - local x = str:byte(j) - - if x < 32 then - decode_error(str, j, "control character in string") - - elseif x == 92 then -- `\`: Escape - res = res .. str:sub(k, j - 1) - j = j + 1 - local c = str:sub(j, j) - if c == "u" then - local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1) - or str:match("^%x%x%x%x", j + 1) - or decode_error(str, j - 1, "invalid unicode escape in string") - res = res .. parse_unicode_escape(hex) - j = j + #hex - else - if not escape_chars[c] then - decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string") - end - res = res .. escape_char_map_inv[c] - end - k = j + 1 - - elseif x == 34 then -- `"`: End of string - res = res .. str:sub(k, j - 1) - return res, j + 1 - end - - j = j + 1 - end - - decode_error(str, i, "expected closing quote for string") -end - - -local function parse_number(str, i) - local x = next_char(str, i, delim_chars) - local s = str:sub(i, x - 1) - local n = tonumber(s) - if not n then - decode_error(str, i, "invalid number '" .. s .. "'") - end - return n, x -end - - -local function parse_literal(str, i) - local x = next_char(str, i, delim_chars) - local word = str:sub(i, x - 1) - if not literals[word] then - decode_error(str, i, "invalid literal '" .. word .. "'") - end - return literal_map[word], x -end - - -local function parse_array(str, i) - local res = {} - local n = 1 - i = i + 1 - while 1 do - local x - i = next_char(str, i, space_chars, true) - -- Empty / end of array? - if str:sub(i, i) == "]" then - i = i + 1 - break - end - -- Read token - x, i = parse(str, i) - res[n] = x - n = n + 1 - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "]" then break end - if chr ~= "," then decode_error(str, i, "expected ']' or ','") end - end - return res, i -end - - -local function parse_object(str, i) - local res = {} - i = i + 1 - while 1 do - local key, val - i = next_char(str, i, space_chars, true) - -- Empty / end of object? - if str:sub(i, i) == "}" then - i = i + 1 - break - end - -- Read key - if str:sub(i, i) ~= '"' then - decode_error(str, i, "expected string for key") - end - key, i = parse(str, i) - -- Read ':' delimiter - i = next_char(str, i, space_chars, true) - if str:sub(i, i) ~= ":" then - decode_error(str, i, "expected ':' after key") - end - i = next_char(str, i + 1, space_chars, true) - -- Read value - val, i = parse(str, i) - -- Set - res[key] = val - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "}" then break end - if chr ~= "," then decode_error(str, i, "expected '}' or ','") end - end - return res, i -end - - -local char_func_map = { - [ '"' ] = parse_string, - [ "0" ] = parse_number, - [ "1" ] = parse_number, - [ "2" ] = parse_number, - [ "3" ] = parse_number, - [ "4" ] = parse_number, - [ "5" ] = parse_number, - [ "6" ] = parse_number, - [ "7" ] = parse_number, - [ "8" ] = parse_number, - [ "9" ] = parse_number, - [ "-" ] = parse_number, - [ "t" ] = parse_literal, - [ "f" ] = parse_literal, - [ "n" ] = parse_literal, - [ "[" ] = parse_array, - [ "{" ] = parse_object, -} - - -parse = function(str, idx) - local chr = str:sub(idx, idx) - local f = char_func_map[chr] - if f then - return f(str, idx) - end - decode_error(str, idx, "unexpected character '" .. chr .. "'") -end - - -function json.decode(str) - if type(str) ~= "string" then - error("expected argument of type string, got " .. type(str)) - end - local res, idx = parse(str, next_char(str, 1, space_chars, true)) - idx = next_char(str, idx, space_chars, true) - if idx <= #str then - decode_error(str, idx, "trailing garbage") - end - return res -end - - -return json \ No newline at end of file diff --git a/examples/scroll-cwd-pull/packet_handler/minecraft.lua b/examples/scroll-cwd-pull/packet_handler/minecraft.lua deleted file mode 100644 index 34bcfa9b..00000000 --- a/examples/scroll-cwd-pull/packet_handler/minecraft.lua +++ /dev/null @@ -1,262 +0,0 @@ -json = require("packet_handler/json") - -function string.fromhex(str) - return (str:gsub('..', function(cc) - return string.char(tonumber(cc, 16)) - end)) -end - -function string.tohex(str) - return (str:gsub('.', function(c) - return string.format('%02X', string.byte(c)) - end)) -end - --- Bitwise AND -local function band(a, b) - local result = 0 - local bitval = 1 - while a > 0 and b > 0 do - local abit = a % 2 - local bbit = b % 2 - if abit == 1 and bbit == 1 then - result = result + bitval - end - a = math.floor(a / 2) - b = math.floor(b / 2) - bitval = bitval * 2 - end - return result -end - --- Bitwise OR -local function bor(a, b) - local result = 0 - local bitval = 1 - while a > 0 or b > 0 do - local abit = a % 2 - local bbit = b % 2 - if abit == 1 or bbit == 1 then - result = result + bitval - end - a = math.floor(a / 2) - b = math.floor(b / 2) - bitval = bitval * 2 - end - return result -end - --- Right Shift -local function rshift(value, shift) - return math.floor(value / (2 ^ shift)) -end - --- Left Shift -local function lshift(value, shift) - return value * (2 ^ shift) -end - -function encodeLEB128(value) - local bytes = {} - repeat - local byte = band(value, 0x7F) - value = rshift(value, 7) - if value ~= 0 then - byte = bor(byte, 0x80) - end - table.insert(bytes, byte) - until value == 0 - return bytes -end - -function decodeLEB128(bytes) - local result = 0 - local shift = 0 - local bytesConsumed = 0 -- Track the number of bytes consumed - - for i, byte in ipairs(bytes) do - local value = band(byte, 0x7F) -- Get lower 7 bits - result = bor(result, lshift(value, shift)) -- Add it to result with the correct shift - bytesConsumed = bytesConsumed + 1 -- Increment the byte counter - if band(byte, 0x80) == 0 then -- If the highest bit is not set, we are done - break - end - shift = shift + 7 -- Move to the next group of 7 bits - end - - return result, bytesConsumed -- Return both the result and the number of bytes consumed -end - -function handle(ctx, data) - hex = string.tohex(data) - - debug_print("Received Packet: " .. hex) - - -- check if hex starts with 0x01 0x00 - if hex:sub(1, 4) == "FE01" then - debug_print("Received Legacy Ping Packet") - sendData(string.fromhex( - "ff002300a7003100000034003700000031002e0034002e0032000000410020004d0069006e006500630072006100660074002000530065007200760065007200000030000000320030")) - end - - local packetNo = 0 - - local maxLoops = 2 - - restBytes = data - - while hex ~= "" do - - queue = get_queue() - - hex = string.tohex(restBytes) - - debug_print("Remaining Bytes: " .. hex) - packetNo = packetNo + 1 - debug_print("Packet No: " .. packetNo) - - packetLength, bytesConsumed = decodeLEB128({string.byte(restBytes, 1, 1)}) - debug_print("Packet Length: " .. packetLength) - - -- cut of consumedBytes and read untul packetLength - packetWithLength = string.sub(restBytes, bytesConsumed + 1, packetLength + bytesConsumed) - - -- next varint is the packetid - packetId, bytesConsumed = decodeLEB128({string.byte(packetWithLength, 1, 1)}) - - debug_print("Packet ID: " .. packetId) - - packetWithLengthHex = string.tohex(packetWithLength) - - debug_print("Trimmed Packet: " .. packetWithLengthHex) - - -- make hex to the rest of the data - restBytes = string.sub(restBytes, packetLength + bytesConsumed + 1) - - debug_print("Rest Bytes: " .. string.tohex(restBytes)) - - if packetLength == 1 and packetId == 0 then - debug_print("Received Status Packet " .. packetWithLengthHex) - sendData(pingResponse()) - - -- check if second byte is 0x01 - elseif packetId == 1 then - debug_print("Received Ping Packet " .. packetWithLengthHex) - -- send same packet back - close(data) - -- login packet 0x20 0x00 - elseif packetId == 0 and packetWithLengthHex:sub(-2) == "02" then -- check for enum at the end - debug_print("Received Login Packet " .. packetWithLengthHex) - -- return - -- debug_print("Received Login Packet") - - sendData(disconnectResponse()) - -- sleep for a sec before closing - finish() - -- return - else - debug_print("Received unknown packet " .. packetWithLengthHex) - -- close("") - end - end -end - -function formatResponse(jsonObj) - local response = json.encode(jsonObj) - local responseBuffer = {string.byte(response, 1, -1)} - local additional = {0x00} - local responseBufferLength = encodeLEB128(#responseBuffer) - local packetLenthBuffer = encodeLEB128(#responseBuffer + #responseBufferLength + 1) - - local concatedBytes = {} - - for i = 1, #packetLenthBuffer do - table.insert(concatedBytes, packetLenthBuffer[i]) - end - - for i = 1, #additional do - table.insert(concatedBytes, additional[i]) - end - - for i = 1, #responseBufferLength do - table.insert(concatedBytes, responseBufferLength[i]) - end - - for i = 1, #responseBuffer do - table.insert(concatedBytes, responseBuffer[i]) - end - - -- convert back to string - local finalString = string.char(unpack(concatedBytes)) - - return finalString -end - -function pingResponse() - - local description = { - color = "red", - extra = {"\n", { - color = "gray", - extra = {{ - bold = true, - text = "HINT" - }, ":", " ", { - color = "white", - text = "Get free servers at:" - }, " ", { - color = "green", - text = "druid.gg" - }}, - text = "" - }}, - text = "This server is in standby." - } - - local obj = { - version = { - name = "§9🕐 Waiting...", - protocol = -1 - }, - description = description, - players = { - max = 0, - online = 1 - }, - favicon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAAAXNSR0IArs4c6QAAAMlQTFRFR3BM6ndq5Wxb3WBQ6HFi0EUvvVxI8IBzzTwm0EUv11RC3GBQ7X1w00w50EUv42pa1lRB3mNT4WZV0Ugz2VlH0ks22lpJ0ks332RU1VI/6XZo8oV4421e63Zn32JR0046ytvZ2FZEieHa5nBgb+fZFerZ1NrZDOrZDurZ1tjYQunZztrZO+jZruDZFOrZDOrZDOrZ6HVoDOrZ09rZ0cvJn+LZbebZi+PZkOPZC+rZ942B7Xpr9op98oR29Id67n1uz9vZH+rZjeTZHadAYQAAADl0Uk5TAOr9sP4WBv4CDXqV8kcf3m277CmGPaAzx1Pg8tD90lw3YxDx/mzTQ+aq/nYk/bT50NSS71SwxIbiWYkesQAABERJREFUeNqll2tfozgUxkshIeF+vxWoiNfRUaszuztDC7rf/0PtISAlpR1dfPLzTZLzz3POIUgXp0XD2PJUkGetfbT4fyJI9+xNsuqVbGx1beDPh7uKnazq7e+96lWSqj79XLihpKv691SrRPU/4YLGtsbCp9quNp5BPjreE1j4KYT9ZxPYDbQt7GObW9XwxxHqTUz/EB/a8hbC2+iVJpiRbUdpokE92RwbdVJQcjp+x3Ztay0N1iFClFLk6oqYMEa3thUKeqp74q7zLYjQdUzIgjBhGiqRBohOdaLjo/FIldm6FhWIEH4NG8pGHgiReywJagnd8eqwzCF0cTAhq/TIDt+stzAE79Rz76pAYKMW4ukZKJDr9nzldJcMIHSd3dloYiAWapCm8iu83ECrO00tIHEH87JojCfP78/O7u/x/pQw3bEcYCM9MKALANht9HH42d3Pn389PF9enw/bLNjWapf4vAUcyDCreaMGn91dfb/49gv09HxNegAS5ZohNIUHuGlrIHVH8bcv/0I40+MDEDoVYGEHkkXMZbAWYBIMjOJfIX7Qw3W/0YjkHSBqOTW4DFQNAElIhvxvX76z+MHDfU+AnUyJPwZQG7jjyv64er34NdbNZb/CvMJmYT0GGCkANAXvDbyCAU7vFkJTZgRNGQP8RAamTsYVeOPiH5/6KqD2LNiteWNALMCUaewBXAZcDjTtHajjJhSCLMvRtARTAAEAEwdYWABoRPwhgJWrkYcUeEAAgNMpPF0P5WLii7g+AJxzReS6AGcxCRZXxKQZAwi5ezlo4+Mz7i9NxeKbRB8DQrPhasD1kcsgTJsOwD/KKAcAdGGv9iq+jUvYG1AE2Amj4l8IWKyaxkRkNANJ7Ak3z+e9gahqmAT+OhMAN6VPRjOYvQ7euqfwso9HQdZ0Mn0eoJtVkymYmzu7vfrn4tvNDbxP+gWqJL0BlgF/HbPJJI5/3N39fXk5vBSRBcd0KteEBxClrCoz5Gf1IEYLMvBc7z2+ykQ0eWPnVVUqmLcV5J6PujnqFmJZNf0wdXIIwB5YyN3FQWWWqWrFuh4Xnlhm1btKDx/51xxl/QJPlcrSNM1SyqpBknjsQwdbZZWZOk81RKmaSLLDaTzrsVSVosFT/UiqMhhVto8/9ZlEQpYE5Qk6EDpl3XACLp7vu5llpoUPPKgOIDIIbSHLyOLy50ULJ5PMNTmoQ6zmzlICLR3bCunitAi1gJDH+MAZaj+7PU8pdJd+9I2ttIQ1nmRHEUIUk8WHQpYjSXlBF3NFaGFKkqkgMhtB41ySnMDFswlYt5fSMorpbBPEDRww4bl4LgKakbcm1gh/IY3WhKjPRhDDa004wXwE1kWzQxhzEciynRYhFuHcx8JQGGKZe7FLZ3a0RbB7qIRzERbUorURWWhuQ9Zq5CyXS0dBs++HbwU5EKwv3FJDh2rk/uILoqFlT38O/QdGyOZnTVzZRwAAAABJRU5ErkJggg==" - } - - local snapshotMode = get_snapshot_mode() - local snapshotPercentage = get_snapshot_percentage() - - if snapshotMode ~= "noop" then - if snapshotMode == "restore" then - if snapshotPercentage == nil or snapshotPercentage == 100 then - obj.version.name = "§2▶ Downloading snapshot... " + format("%.2f", snapshotPercentage) + "%" - else - obj.version.name = "§2▶ Extracting snapshot..." - end - obj.description = "Restoring Minecraft Server, this might take a moment" - else - if snapshotPercentage == nil or snapshotPercentage == 100 then - obj.version.name = "§2▶ Backing up... " + format("%.2f", snapshotPercentage) + "%" - else - obj.version.name = "§2▶ Backing up..." - end - obj.description = "Backing up Minecraft Server, this might take a moment" - end - elseif queue ~= nil and queue["install"] == "running" then - obj.version.name = "§2▶ Installing..." - obj.description = "Installing Minecraft Server, this might take a moment" - elseif get_finish_sec() ~= nil then - obj.version.name = "§2▶ Starting..." - obj.description = "Starting " .. math.ceil(get_finish_sec()) .. "s" - end - - return formatResponse(obj) -end - -function disconnectResponse() - local obj = "Our super cool system will start now... please wait" - return formatResponse(obj) -end diff --git a/examples/scroll-cwd-pull/scroll.yaml b/examples/scroll-cwd-pull/scroll.yaml deleted file mode 100644 index 027fb473..00000000 --- a/examples/scroll-cwd-pull/scroll.yaml +++ /dev/null @@ -1,72 +0,0 @@ -name: artifacts.druid.gg/druid-team/scroll-minecraft-forge -desc: Minecraft Forge -version: 0.0.1 -app_version: 1.20.1 -ports: - - name: main - protocol: tcp - port: 25565 - sleep_handler: packet_handler/minecraft.lua - start_delay: 10 - finish_after_command: install - - name: rcon - protocol: tcp - port: 25575 -serve: "start" -commands: - start: - needs: [install] - run: restart - dependencies: [jdk17] - procedures: - - mode: exec - data: - - sh - - ./update_user_args.sh - - mode: exec - data: - - sh - - ./run.sh - stop: - procedures: - - mode: rcon - data: stop - install: - run: once - dependencies: [wget, cacert, jdk17] - procedures: - - mode: exec - data: - - wget - - -q - - -O - - forge-installer.jar - - http://192.168.100.200:9000/snapshot-cache/minecraft/forge/forge-1.20.1.jar - - mode: exec - data: - - java - - -jar - - forge-installer.jar - - --installServer - - mode: exec - data: - - rm - - forge-installer.jar - - mode: exec - data: - - bash - - -c - - echo eula=true > eula.txt - update: - procedures: - - mode: exec - data: - - sh - - $SCROLL_DIR/update.sh - - mode: exec - data: - - bash - - -c - - echo eula=true > eula.txt -plugins: - rcon: {} diff --git a/examples/scroll-cwd-pull/update/.gitkeep b/examples/scroll-cwd-pull/update/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/scroll-cwd/annotations.json b/examples/scroll-cwd/annotations.json deleted file mode 100644 index 243beb97..00000000 --- a/examples/scroll-cwd/annotations.json +++ /dev/null @@ -1 +0,0 @@ -{"gg.druid.scroll.image":"artifacts.druid.gg/druid-team/druid:stable-nix","gg.druid.scroll.minCpu":"0.25","gg.druid.scroll.minDisk":"3Gi","gg.druid.scroll.minRam":"512Mi","gg.druid.scroll.port.main":"25565","gg.druid.scroll.port.rcon":"25575","gg.druid.scroll.smart":"true","org.opencontainers.image.created":"2026-01-31T12:08:52Z"} \ No newline at end of file diff --git a/examples/scroll-cwd/manifest.json b/examples/scroll-cwd/manifest.json deleted file mode 100644 index 2b9e08ad..00000000 --- a/examples/scroll-cwd/manifest.json +++ /dev/null @@ -1 +0,0 @@ -{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:d43488edeca23bc6fe8a29df14743012d961def04fd2e0c6e34fa98a92ec6d2e","size":2758} \ No newline at end of file diff --git a/examples/scroll-cwd/packet_handler/json.lua b/examples/scroll-cwd/packet_handler/json.lua deleted file mode 100644 index 54d44484..00000000 --- a/examples/scroll-cwd/packet_handler/json.lua +++ /dev/null @@ -1,388 +0,0 @@ --- --- json.lua --- --- Copyright (c) 2020 rxi --- --- Permission is hereby granted, free of charge, to any person obtaining a copy of --- this software and associated documentation files (the "Software"), to deal in --- the Software without restriction, including without limitation the rights to --- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies --- of the Software, and to permit persons to whom the Software is furnished to do --- so, subject to the following conditions: --- --- The above copyright notice and this permission notice shall be included in all --- copies or substantial portions of the Software. --- --- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --- SOFTWARE. --- - -local json = { _version = "0.1.2" } - -------------------------------------------------------------------------------- --- Encode -------------------------------------------------------------------------------- - -local encode - -local escape_char_map = { - [ "\\" ] = "\\", - [ "\"" ] = "\"", - [ "\b" ] = "b", - [ "\f" ] = "f", - [ "\n" ] = "n", - [ "\r" ] = "r", - [ "\t" ] = "t", -} - -local escape_char_map_inv = { [ "/" ] = "/" } -for k, v in pairs(escape_char_map) do - escape_char_map_inv[v] = k -end - - -local function escape_char(c) - return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte())) -end - - -local function encode_nil(val) - return "null" -end - - -local function encode_table(val, stack) - local res = {} - stack = stack or {} - - -- Circular reference? - if stack[val] then error("circular reference") end - - stack[val] = true - - if rawget(val, 1) ~= nil or next(val) == nil then - -- Treat as array -- check keys are valid and it is not sparse - local n = 0 - for k in pairs(val) do - if type(k) ~= "number" then - error("invalid table: mixed or invalid key types") - end - n = n + 1 - end - if n ~= #val then - error("invalid table: sparse array") - end - -- Encode - for i, v in ipairs(val) do - table.insert(res, encode(v, stack)) - end - stack[val] = nil - return "[" .. table.concat(res, ",") .. "]" - - else - -- Treat as an object - for k, v in pairs(val) do - if type(k) ~= "string" then - error("invalid table: mixed or invalid key types") - end - table.insert(res, encode(k, stack) .. ":" .. encode(v, stack)) - end - stack[val] = nil - return "{" .. table.concat(res, ",") .. "}" - end -end - - -local function encode_string(val) - return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"' -end - - -local function encode_number(val) - -- Check for NaN, -inf and inf - if val ~= val or val <= -math.huge or val >= math.huge then - error("unexpected number value '" .. tostring(val) .. "'") - end - return string.format("%.14g", val) -end - - -local type_func_map = { - [ "nil" ] = encode_nil, - [ "table" ] = encode_table, - [ "string" ] = encode_string, - [ "number" ] = encode_number, - [ "boolean" ] = tostring, -} - - -encode = function(val, stack) - local t = type(val) - local f = type_func_map[t] - if f then - return f(val, stack) - end - error("unexpected type '" .. t .. "'") -end - - -function json.encode(val) - return ( encode(val) ) -end - - -------------------------------------------------------------------------------- --- Decode -------------------------------------------------------------------------------- - -local parse - -local function create_set(...) - local res = {} - for i = 1, select("#", ...) do - res[ select(i, ...) ] = true - end - return res -end - -local space_chars = create_set(" ", "\t", "\r", "\n") -local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",") -local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u") -local literals = create_set("true", "false", "null") - -local literal_map = { - [ "true" ] = true, - [ "false" ] = false, - [ "null" ] = nil, -} - - -local function next_char(str, idx, set, negate) - for i = idx, #str do - if set[str:sub(i, i)] ~= negate then - return i - end - end - return #str + 1 -end - - -local function decode_error(str, idx, msg) - local line_count = 1 - local col_count = 1 - for i = 1, idx - 1 do - col_count = col_count + 1 - if str:sub(i, i) == "\n" then - line_count = line_count + 1 - col_count = 1 - end - end - error( string.format("%s at line %d col %d", msg, line_count, col_count) ) -end - - -local function codepoint_to_utf8(n) - -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa - local f = math.floor - if n <= 0x7f then - return string.char(n) - elseif n <= 0x7ff then - return string.char(f(n / 64) + 192, n % 64 + 128) - elseif n <= 0xffff then - return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128) - elseif n <= 0x10ffff then - return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128, - f(n % 4096 / 64) + 128, n % 64 + 128) - end - error( string.format("invalid unicode codepoint '%x'", n) ) -end - - -local function parse_unicode_escape(s) - local n1 = tonumber( s:sub(1, 4), 16 ) - local n2 = tonumber( s:sub(7, 10), 16 ) - -- Surrogate pair? - if n2 then - return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000) - else - return codepoint_to_utf8(n1) - end -end - - -local function parse_string(str, i) - local res = "" - local j = i + 1 - local k = j - - while j <= #str do - local x = str:byte(j) - - if x < 32 then - decode_error(str, j, "control character in string") - - elseif x == 92 then -- `\`: Escape - res = res .. str:sub(k, j - 1) - j = j + 1 - local c = str:sub(j, j) - if c == "u" then - local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1) - or str:match("^%x%x%x%x", j + 1) - or decode_error(str, j - 1, "invalid unicode escape in string") - res = res .. parse_unicode_escape(hex) - j = j + #hex - else - if not escape_chars[c] then - decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string") - end - res = res .. escape_char_map_inv[c] - end - k = j + 1 - - elseif x == 34 then -- `"`: End of string - res = res .. str:sub(k, j - 1) - return res, j + 1 - end - - j = j + 1 - end - - decode_error(str, i, "expected closing quote for string") -end - - -local function parse_number(str, i) - local x = next_char(str, i, delim_chars) - local s = str:sub(i, x - 1) - local n = tonumber(s) - if not n then - decode_error(str, i, "invalid number '" .. s .. "'") - end - return n, x -end - - -local function parse_literal(str, i) - local x = next_char(str, i, delim_chars) - local word = str:sub(i, x - 1) - if not literals[word] then - decode_error(str, i, "invalid literal '" .. word .. "'") - end - return literal_map[word], x -end - - -local function parse_array(str, i) - local res = {} - local n = 1 - i = i + 1 - while 1 do - local x - i = next_char(str, i, space_chars, true) - -- Empty / end of array? - if str:sub(i, i) == "]" then - i = i + 1 - break - end - -- Read token - x, i = parse(str, i) - res[n] = x - n = n + 1 - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "]" then break end - if chr ~= "," then decode_error(str, i, "expected ']' or ','") end - end - return res, i -end - - -local function parse_object(str, i) - local res = {} - i = i + 1 - while 1 do - local key, val - i = next_char(str, i, space_chars, true) - -- Empty / end of object? - if str:sub(i, i) == "}" then - i = i + 1 - break - end - -- Read key - if str:sub(i, i) ~= '"' then - decode_error(str, i, "expected string for key") - end - key, i = parse(str, i) - -- Read ':' delimiter - i = next_char(str, i, space_chars, true) - if str:sub(i, i) ~= ":" then - decode_error(str, i, "expected ':' after key") - end - i = next_char(str, i + 1, space_chars, true) - -- Read value - val, i = parse(str, i) - -- Set - res[key] = val - -- Next token - i = next_char(str, i, space_chars, true) - local chr = str:sub(i, i) - i = i + 1 - if chr == "}" then break end - if chr ~= "," then decode_error(str, i, "expected '}' or ','") end - end - return res, i -end - - -local char_func_map = { - [ '"' ] = parse_string, - [ "0" ] = parse_number, - [ "1" ] = parse_number, - [ "2" ] = parse_number, - [ "3" ] = parse_number, - [ "4" ] = parse_number, - [ "5" ] = parse_number, - [ "6" ] = parse_number, - [ "7" ] = parse_number, - [ "8" ] = parse_number, - [ "9" ] = parse_number, - [ "-" ] = parse_number, - [ "t" ] = parse_literal, - [ "f" ] = parse_literal, - [ "n" ] = parse_literal, - [ "[" ] = parse_array, - [ "{" ] = parse_object, -} - - -parse = function(str, idx) - local chr = str:sub(idx, idx) - local f = char_func_map[chr] - if f then - return f(str, idx) - end - decode_error(str, idx, "unexpected character '" .. chr .. "'") -end - - -function json.decode(str) - if type(str) ~= "string" then - error("expected argument of type string, got " .. type(str)) - end - local res, idx = parse(str, next_char(str, 1, space_chars, true)) - idx = next_char(str, idx, space_chars, true) - if idx <= #str then - decode_error(str, idx, "trailing garbage") - end - return res -end - - -return json \ No newline at end of file diff --git a/examples/scroll-cwd/packet_handler/minecraft.lua b/examples/scroll-cwd/packet_handler/minecraft.lua deleted file mode 100644 index 34bcfa9b..00000000 --- a/examples/scroll-cwd/packet_handler/minecraft.lua +++ /dev/null @@ -1,262 +0,0 @@ -json = require("packet_handler/json") - -function string.fromhex(str) - return (str:gsub('..', function(cc) - return string.char(tonumber(cc, 16)) - end)) -end - -function string.tohex(str) - return (str:gsub('.', function(c) - return string.format('%02X', string.byte(c)) - end)) -end - --- Bitwise AND -local function band(a, b) - local result = 0 - local bitval = 1 - while a > 0 and b > 0 do - local abit = a % 2 - local bbit = b % 2 - if abit == 1 and bbit == 1 then - result = result + bitval - end - a = math.floor(a / 2) - b = math.floor(b / 2) - bitval = bitval * 2 - end - return result -end - --- Bitwise OR -local function bor(a, b) - local result = 0 - local bitval = 1 - while a > 0 or b > 0 do - local abit = a % 2 - local bbit = b % 2 - if abit == 1 or bbit == 1 then - result = result + bitval - end - a = math.floor(a / 2) - b = math.floor(b / 2) - bitval = bitval * 2 - end - return result -end - --- Right Shift -local function rshift(value, shift) - return math.floor(value / (2 ^ shift)) -end - --- Left Shift -local function lshift(value, shift) - return value * (2 ^ shift) -end - -function encodeLEB128(value) - local bytes = {} - repeat - local byte = band(value, 0x7F) - value = rshift(value, 7) - if value ~= 0 then - byte = bor(byte, 0x80) - end - table.insert(bytes, byte) - until value == 0 - return bytes -end - -function decodeLEB128(bytes) - local result = 0 - local shift = 0 - local bytesConsumed = 0 -- Track the number of bytes consumed - - for i, byte in ipairs(bytes) do - local value = band(byte, 0x7F) -- Get lower 7 bits - result = bor(result, lshift(value, shift)) -- Add it to result with the correct shift - bytesConsumed = bytesConsumed + 1 -- Increment the byte counter - if band(byte, 0x80) == 0 then -- If the highest bit is not set, we are done - break - end - shift = shift + 7 -- Move to the next group of 7 bits - end - - return result, bytesConsumed -- Return both the result and the number of bytes consumed -end - -function handle(ctx, data) - hex = string.tohex(data) - - debug_print("Received Packet: " .. hex) - - -- check if hex starts with 0x01 0x00 - if hex:sub(1, 4) == "FE01" then - debug_print("Received Legacy Ping Packet") - sendData(string.fromhex( - "ff002300a7003100000034003700000031002e0034002e0032000000410020004d0069006e006500630072006100660074002000530065007200760065007200000030000000320030")) - end - - local packetNo = 0 - - local maxLoops = 2 - - restBytes = data - - while hex ~= "" do - - queue = get_queue() - - hex = string.tohex(restBytes) - - debug_print("Remaining Bytes: " .. hex) - packetNo = packetNo + 1 - debug_print("Packet No: " .. packetNo) - - packetLength, bytesConsumed = decodeLEB128({string.byte(restBytes, 1, 1)}) - debug_print("Packet Length: " .. packetLength) - - -- cut of consumedBytes and read untul packetLength - packetWithLength = string.sub(restBytes, bytesConsumed + 1, packetLength + bytesConsumed) - - -- next varint is the packetid - packetId, bytesConsumed = decodeLEB128({string.byte(packetWithLength, 1, 1)}) - - debug_print("Packet ID: " .. packetId) - - packetWithLengthHex = string.tohex(packetWithLength) - - debug_print("Trimmed Packet: " .. packetWithLengthHex) - - -- make hex to the rest of the data - restBytes = string.sub(restBytes, packetLength + bytesConsumed + 1) - - debug_print("Rest Bytes: " .. string.tohex(restBytes)) - - if packetLength == 1 and packetId == 0 then - debug_print("Received Status Packet " .. packetWithLengthHex) - sendData(pingResponse()) - - -- check if second byte is 0x01 - elseif packetId == 1 then - debug_print("Received Ping Packet " .. packetWithLengthHex) - -- send same packet back - close(data) - -- login packet 0x20 0x00 - elseif packetId == 0 and packetWithLengthHex:sub(-2) == "02" then -- check for enum at the end - debug_print("Received Login Packet " .. packetWithLengthHex) - -- return - -- debug_print("Received Login Packet") - - sendData(disconnectResponse()) - -- sleep for a sec before closing - finish() - -- return - else - debug_print("Received unknown packet " .. packetWithLengthHex) - -- close("") - end - end -end - -function formatResponse(jsonObj) - local response = json.encode(jsonObj) - local responseBuffer = {string.byte(response, 1, -1)} - local additional = {0x00} - local responseBufferLength = encodeLEB128(#responseBuffer) - local packetLenthBuffer = encodeLEB128(#responseBuffer + #responseBufferLength + 1) - - local concatedBytes = {} - - for i = 1, #packetLenthBuffer do - table.insert(concatedBytes, packetLenthBuffer[i]) - end - - for i = 1, #additional do - table.insert(concatedBytes, additional[i]) - end - - for i = 1, #responseBufferLength do - table.insert(concatedBytes, responseBufferLength[i]) - end - - for i = 1, #responseBuffer do - table.insert(concatedBytes, responseBuffer[i]) - end - - -- convert back to string - local finalString = string.char(unpack(concatedBytes)) - - return finalString -end - -function pingResponse() - - local description = { - color = "red", - extra = {"\n", { - color = "gray", - extra = {{ - bold = true, - text = "HINT" - }, ":", " ", { - color = "white", - text = "Get free servers at:" - }, " ", { - color = "green", - text = "druid.gg" - }}, - text = "" - }}, - text = "This server is in standby." - } - - local obj = { - version = { - name = "§9🕐 Waiting...", - protocol = -1 - }, - description = description, - players = { - max = 0, - online = 1 - }, - favicon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAAAXNSR0IArs4c6QAAAMlQTFRFR3BM6ndq5Wxb3WBQ6HFi0EUvvVxI8IBzzTwm0EUv11RC3GBQ7X1w00w50EUv42pa1lRB3mNT4WZV0Ugz2VlH0ks22lpJ0ks332RU1VI/6XZo8oV4421e63Zn32JR0046ytvZ2FZEieHa5nBgb+fZFerZ1NrZDOrZDurZ1tjYQunZztrZO+jZruDZFOrZDOrZDOrZ6HVoDOrZ09rZ0cvJn+LZbebZi+PZkOPZC+rZ942B7Xpr9op98oR29Id67n1uz9vZH+rZjeTZHadAYQAAADl0Uk5TAOr9sP4WBv4CDXqV8kcf3m277CmGPaAzx1Pg8tD90lw3YxDx/mzTQ+aq/nYk/bT50NSS71SwxIbiWYkesQAABERJREFUeNqll2tfozgUxkshIeF+vxWoiNfRUaszuztDC7rf/0PtISAlpR1dfPLzTZLzz3POIUgXp0XD2PJUkGetfbT4fyJI9+xNsuqVbGx1beDPh7uKnazq7e+96lWSqj79XLihpKv691SrRPU/4YLGtsbCp9quNp5BPjreE1j4KYT9ZxPYDbQt7GObW9XwxxHqTUz/EB/a8hbC2+iVJpiRbUdpokE92RwbdVJQcjp+x3Ztay0N1iFClFLk6oqYMEa3thUKeqp74q7zLYjQdUzIgjBhGiqRBohOdaLjo/FIldm6FhWIEH4NG8pGHgiReywJagnd8eqwzCF0cTAhq/TIDt+stzAE79Rz76pAYKMW4ukZKJDr9nzldJcMIHSd3dloYiAWapCm8iu83ECrO00tIHEH87JojCfP78/O7u/x/pQw3bEcYCM9MKALANht9HH42d3Pn389PF9enw/bLNjWapf4vAUcyDCreaMGn91dfb/49gv09HxNegAS5ZohNIUHuGlrIHVH8bcv/0I40+MDEDoVYGEHkkXMZbAWYBIMjOJfIX7Qw3W/0YjkHSBqOTW4DFQNAElIhvxvX76z+MHDfU+AnUyJPwZQG7jjyv64er34NdbNZb/CvMJmYT0GGCkANAXvDbyCAU7vFkJTZgRNGQP8RAamTsYVeOPiH5/6KqD2LNiteWNALMCUaewBXAZcDjTtHajjJhSCLMvRtARTAAEAEwdYWABoRPwhgJWrkYcUeEAAgNMpPF0P5WLii7g+AJxzReS6AGcxCRZXxKQZAwi5ezlo4+Mz7i9NxeKbRB8DQrPhasD1kcsgTJsOwD/KKAcAdGGv9iq+jUvYG1AE2Amj4l8IWKyaxkRkNANJ7Ak3z+e9gahqmAT+OhMAN6VPRjOYvQ7euqfwso9HQdZ0Mn0eoJtVkymYmzu7vfrn4tvNDbxP+gWqJL0BlgF/HbPJJI5/3N39fXk5vBSRBcd0KteEBxClrCoz5Gf1IEYLMvBc7z2+ykQ0eWPnVVUqmLcV5J6PujnqFmJZNf0wdXIIwB5YyN3FQWWWqWrFuh4Xnlhm1btKDx/51xxl/QJPlcrSNM1SyqpBknjsQwdbZZWZOk81RKmaSLLDaTzrsVSVosFT/UiqMhhVto8/9ZlEQpYE5Qk6EDpl3XACLp7vu5llpoUPPKgOIDIIbSHLyOLy50ULJ5PMNTmoQ6zmzlICLR3bCunitAi1gJDH+MAZaj+7PU8pdJd+9I2ttIQ1nmRHEUIUk8WHQpYjSXlBF3NFaGFKkqkgMhtB41ySnMDFswlYt5fSMorpbBPEDRww4bl4LgKakbcm1gh/IY3WhKjPRhDDa004wXwE1kWzQxhzEciynRYhFuHcx8JQGGKZe7FLZ3a0RbB7qIRzERbUorURWWhuQ9Zq5CyXS0dBs++HbwU5EKwv3FJDh2rk/uILoqFlT38O/QdGyOZnTVzZRwAAAABJRU5ErkJggg==" - } - - local snapshotMode = get_snapshot_mode() - local snapshotPercentage = get_snapshot_percentage() - - if snapshotMode ~= "noop" then - if snapshotMode == "restore" then - if snapshotPercentage == nil or snapshotPercentage == 100 then - obj.version.name = "§2▶ Downloading snapshot... " + format("%.2f", snapshotPercentage) + "%" - else - obj.version.name = "§2▶ Extracting snapshot..." - end - obj.description = "Restoring Minecraft Server, this might take a moment" - else - if snapshotPercentage == nil or snapshotPercentage == 100 then - obj.version.name = "§2▶ Backing up... " + format("%.2f", snapshotPercentage) + "%" - else - obj.version.name = "§2▶ Backing up..." - end - obj.description = "Backing up Minecraft Server, this might take a moment" - end - elseif queue ~= nil and queue["install"] == "running" then - obj.version.name = "§2▶ Installing..." - obj.description = "Installing Minecraft Server, this might take a moment" - elseif get_finish_sec() ~= nil then - obj.version.name = "§2▶ Starting..." - obj.description = "Starting " .. math.ceil(get_finish_sec()) .. "s" - end - - return formatResponse(obj) -end - -function disconnectResponse() - local obj = "Our super cool system will start now... please wait" - return formatResponse(obj) -end diff --git a/examples/scroll-cwd/scroll.yaml b/examples/scroll-cwd/scroll.yaml deleted file mode 100644 index 758006b5..00000000 --- a/examples/scroll-cwd/scroll.yaml +++ /dev/null @@ -1,72 +0,0 @@ -name: artifacts.druid.gg/druid-team/scroll-minecraft-forge -desc: Minecraft Forge -version: 0.0.1 -app_version: 1.20.1-test -ports: - - name: main - protocol: tcp - port: 25565 - sleep_handler: packet_handler/minecraft.lua - start_delay: 10 - finish_after_command: install - - name: rcon - protocol: tcp - port: 25575 -init: "start" -commands: - start: - needs: [install] - run: restart - dependencies: [jdk17] - procedures: - - mode: exec - data: - - sh - - ./update_user_args.sh - - mode: exec - data: - - sh - - ./run.sh - stop: - procedures: - - mode: rcon - data: stop - install: - run: once - dependencies: [wget, cacert, jdk17] - procedures: - - mode: exec - data: - - wget - - -q - - -O - - forge-installer.jar - - http://192.168.100.200:9000/snapshot-cache/minecraft/forge/forge-1.20.1.jar - - mode: exec - data: - - java - - -jar - - forge-installer.jar - - --installServer - - mode: exec - data: - - rm - - forge-installer.jar - - mode: exec - data: - - bash - - -c - - echo eula=true > eula.txt - update: - procedures: - - mode: exec - data: - - sh - - $SCROLL_DIR/update.sh - - mode: exec - data: - - bash - - -c - - echo eula=true > eula.txt -plugins: - rcon: {} diff --git a/examples/scroll-cwd/update/.gitkeep b/examples/scroll-cwd/update/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/static-web/scroll.yaml b/examples/static-web/scroll.yaml new file mode 100644 index 00000000..165593f9 --- /dev/null +++ b/examples/static-web/scroll.yaml @@ -0,0 +1,44 @@ +name: ghcr.io/druid-examples/static-web +desc: Static web server using commands as runtime units +version: 0.1.0 +app_version: "1.0" + +ports: + - name: http + protocol: http + port: 80 + mandatory: true + +serve: "start" +commands: + build: + run: once + procedures: + - image: alpine:3.20 + mounts: + - path: /site + command: + - sh + - -c + - >- + mkdir -p /site/public + && printf '

Druid static web example

\n' > /site/public/index.html + + start: + needs: [build] + run: restart + procedures: + - image: nginx:1.27-alpine + expectedPorts: + - name: http + keepAliveTraffic: 1b/5m + mounts: + - path: /usr/share/nginx/html + sub_path: public + + stop: + run: always + procedures: + - type: signal + target: start + signal: SIGTERM diff --git a/go.mod b/go.mod index ecd508d3..d00b2237 100644 --- a/go.mod +++ b/go.mod @@ -6,116 +6,134 @@ toolchain go1.24.7 require ( github.com/Masterminds/semver/v3 v3.2.1 - github.com/Masterminds/sprig v2.22.0+incompatible github.com/gofiber/contrib/websocket v1.3.4 github.com/gofiber/fiber/v2 v2.52.9 - github.com/hashicorp/go-plugin v1.6.1 - github.com/opencontainers/image-spec v1.1.0 - github.com/prometheus/client_golang v1.19.1 - github.com/shirou/gopsutil v3.21.11+incompatible - github.com/spf13/cobra v1.8.1 - github.com/spf13/viper v1.19.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/spf13/cobra v1.9.1 + github.com/spf13/viper v1.20.1 go.uber.org/zap v1.27.0 gopkg.in/yaml.v3 v3.0.1 // indirect oras.land/oras-go/v2 v2.5.0 ) require ( - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.5.0 // indirect github.com/andybalholm/brotli v1.2.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/fasthttp/websocket v1.5.8 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/fsnotify/fsnotify v1.9.0 github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/golang/protobuf v1.5.4 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/go-hclog v1.6.3 - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/joho/godotenv v1.5.1 github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/opencontainers/go-digest v1.0.0 github.com/rivo/uniseg v0.4.7 // indirect github.com/savsgio/gotils v0.0.0-20240303185622-093b76447511 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/cast v1.9.2 // indirect + github.com/spf13/pflag v1.0.7 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasthttp v1.65.0 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.42.0 // indirect - golang.org/x/net v0.44.0 - golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/text v0.29.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/grpc v1.65.0 - google.golang.org/protobuf v1.34.2 - gopkg.in/ini.v1 v1.67.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/text v0.31.0 // indirect + google.golang.org/protobuf v1.36.6 ) require ( + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.8.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/morikuni/aec v1.1.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/stretchr/testify v1.11.1 // indirect github.com/woodsbury/decimal128 v1.3.0 // indirect - go.uber.org/atomic v1.9.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/time v0.12.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gotest.tools/v3 v3.5.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + modernc.org/libc v1.61.13 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect + sigs.k8s.io/yaml v1.5.0 // indirect ) require ( - al.essio.dev/pkg/shellescape v1.6.0 github.com/MicahParks/keyfunc v1.9.0 - github.com/creack/pty v1.1.21 + github.com/cilium/cilium v1.18.6 + github.com/docker/docker v28.3.3+incompatible + github.com/docker/go-connections v0.5.0 github.com/getkin/kin-openapi v0.133.0 github.com/go-co-op/gocron v1.37.0 - github.com/gofiber/jwt/v3 v3.3.10 github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/gopacket/gopacket v1.2.0 - github.com/gorilla/websocket v1.5.3 - github.com/highcard-dev/gorcon v1.3.10 + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 github.com/oapi-codegen/runtime v1.1.2 github.com/otiai10/copy v1.14.0 - github.com/packetcap/go-pcap v0.0.0-20240528124601-8c87ecf5dbc5 github.com/yuin/gopher-lua v1.1.1 go.uber.org/mock v0.4.0 + google.golang.org/grpc v1.74.2 gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/client-go v0.33.4 + modernc.org/sqlite v1.36.3 ) diff --git a/go.sum b/go.sum index 703a3a39..27ac2154 100644 --- a/go.sum +++ b/go.sum @@ -1,109 +1,114 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/MicahParks/keyfunc v1.9.0 h1:lhKd5xrFHLNOWrDc4Tyb/Q1AJ4LCzQ48GVJyVIID3+o= github.com/MicahParks/keyfunc v1.9.0/go.mod h1:IdnCilugA0O/99dW+/MkvlyrsX8+L8+x95xuVNtM5jw= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= -github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cilium/cilium v1.18.6 h1:acz5aRKhZbarCO1flx2vCP9wBh+lDc02uJgdqiTsJbA= +github.com/cilium/cilium v1.18.6/go.mod h1:mzpKpkILwP24adE975fTVdAojyy6C1tq7TDa9qZCWyo= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/fasthttp/websocket v1.5.8 h1:k5DpirKkftIF/w1R8ZzjSgARJrs54Je9YJK37DL/Ah8= github.com/fasthttp/websocket v1.5.8/go.mod h1:d08g8WaT6nnyvg9uMm8K9zMYyDjfKyj3170AtPRuVU0= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= +github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gofiber/contrib/websocket v1.3.4 h1:tWeBdbJ8q0WFQXariLN4dBIbGH9KBU75s0s7YXplOSg= github.com/gofiber/contrib/websocket v1.3.4/go.mod h1:kTFBPC6YENCnKfKx0BoOFjgXxdz7E85/STdkmZPEmPs= -github.com/gofiber/fiber/v2 v2.45.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc= github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw= github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw= -github.com/gofiber/jwt/v3 v3.3.10 h1:0bpWtFKaGepjwYTU4efHfy0o+matSqZwTxGMo5a+uuc= -github.com/gofiber/jwt/v3 v3.3.10/go.mod h1:GJorFVaDyfMPSK9RB8RG4NQ3s1oXKTmYaoL/ny08O1A= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopacket/gopacket v1.2.0 h1:eXbzFad7f73P1n2EJHQlsKuvIMJjVXK5tXoSca78I3A= -github.com/gopacket/gopacket v1.2.0/go.mod h1:BrAKEy5EOGQ76LSqh7DMAr7z0NNPdczWm2GxCG7+I8M= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= -github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/highcard-dev/gorcon v1.3.10 h1:U+TFuwbxj2HdUH+cxSIv7FJcI8xeo+OahD3Bt6wiCVA= -github.com/highcard-dev/gorcon v1.3.10/go.mod h1:aY9k0E7CrRRhGiktjoYnvfLFaWnUj+0VUhNSsTy5g/Y= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -115,70 +120,66 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= +github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/packetcap/go-pcap v0.0.0-20240528124601-8c87ecf5dbc5 h1:p4VuaitqUAqSZSomd7Wb4BPV/Jj7Hno2/iqtfX7DZJI= -github.com/packetcap/go-pcap v0.0.0-20240528124601-8c87ecf5dbc5/go.mod h1:zIAoVKeWP0mz4zXY50UYQt6NLg2uwKRswMDcGEqOms4= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -186,78 +187,82 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8= -github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4= -github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee/go.mod h1:qwtSXrKuJh/zsFQ12yEE89xfCrGKK63Rr7ctU/uCo4g= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= github.com/savsgio/gotils v0.0.0-20240303185622-093b76447511 h1:KanIMPX0QdEdB4R3CiimCAbxFrhB3j7h0/OvpYGVQa8= github.com/savsgio/gotils v0.0.0-20240303185622-093b76447511/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg= -github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= -github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA= github.com/valyala/fasthttp v1.65.0 h1:j/u3uzFEGFfRxw79iYzJN+TteTJwbYkru9uDp3d0Yf8= github.com/valyala/fasthttp v1.65.0/go.mod h1:P/93/YkKPMsKSnATEeELUCkG8a7Y+k99uxNHVbKINr4= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIjVWss0= github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= @@ -266,96 +271,125 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 h1:qJW29YvkiJmXOYMu5Tf8lyrTp3dOS+K4z6IixtLaCf8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0= +modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo= +modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw= +modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8= +modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.36.3 h1:qYMYlFR+rtLDUzuXoST1SDIdEPbX8xzuhdF90WsX1ss= +modernc.org/sqlite v1.36.3/go.mod h1:ADySlx7K4FdY5MaJcEv86hTJ0PjedAloTUuif0YS3ws= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= +sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= diff --git a/internal/api/generated.go b/internal/api/generated.go index 44c11c8d..30235cbb 100644 --- a/internal/api/generated.go +++ b/internal/api/generated.go @@ -6,9 +6,12 @@ package api import ( "bytes" "compress/gzip" + "context" "encoding/base64" "encoding/json" "fmt" + "io" + "net/http" "net/url" "path" "strings" @@ -19,730 +22,1098 @@ import ( "github.com/oapi-codegen/runtime" ) +// Defines values for RuntimeScrollStatus. const ( - BearerAuthScopes = "bearerAuth.Scopes" + Created RuntimeScrollStatus = "created" + Deleted RuntimeScrollStatus = "deleted" + Error RuntimeScrollStatus = "error" + Running RuntimeScrollStatus = "running" + Stopped RuntimeScrollStatus = "stopped" ) -// Defines values for AddPortRequestProtocol. -const ( - Tcp AddPortRequestProtocol = "tcp" - Udp AddPortRequestProtocol = "udp" -) +// CreateScrollRequest defines model for CreateScrollRequest. +type CreateScrollRequest struct { + // Artifact OCI artifact reference or local scroll path + Artifact string `json:"artifact"` -// Defines values for CommandInstructionSetRun. -const ( - Always CommandInstructionSetRun = "always" - Once CommandInstructionSetRun = "once" - Persistent CommandInstructionSetRun = "persistent" - Restart CommandInstructionSetRun = "restart" -) + // DataRoot Optional daemon-local path or backend ref containing runtime data directory. If omitted, a materializing runtime backend may pull the artifact. + DataRoot *string `json:"data_root,omitempty"` -// Defines values for ConsoleType. -const ( - ConsoleTypePlugin ConsoleType = "plugin" - ConsoleTypeProcess ConsoleType = "process" - ConsoleTypeTty ConsoleType = "tty" -) + // Id Deprecated alias for name. Optional local runtime scroll id/name. + Id *string `json:"id,omitempty"` -// Defines values for ScrollLockStatus. -const ( - Done ScrollLockStatus = "done" - Error ScrollLockStatus = "error" - Running ScrollLockStatus = "running" - Waiting ScrollLockStatus = "waiting" -) - -// AddPortRequest defines model for AddPortRequest. -type AddPortRequest struct { - // CheckActivity Whether to monitor port activity - CheckActivity *bool `json:"check_activity,omitempty"` + // Name Optional local runtime scroll id/name. If omitted, the daemon derives it from scroll.yaml name. + Name *string `json:"name,omitempty"` - // Description Optional port description - Description *string `json:"description,omitempty"` + // ScrollRoot Optional daemon-local path or backend ref containing scroll.yaml and scroll spec files. If omitted, a materializing runtime backend may pull the artifact. + ScrollRoot *string `json:"scroll_root,omitempty"` +} - // Mandatory Whether this port must be open for health check - Mandatory *bool `json:"mandatory,omitempty"` +// DeletedScroll defines model for DeletedScroll. +type DeletedScroll struct { + Id string `json:"id"` + Status string `json:"status"` +} - // Name Port name/identifier - Name string `json:"name"` +// HealthResponse defines model for HealthResponse. +type HealthResponse struct { + // Mode Current health status mode + Mode string `json:"mode"` - // Port Port number (1-65535) - Port int `json:"port"` + // Progress Progress percentage for loading operations + Progress *float32 `json:"progress,omitempty"` - // Protocol Network protocol (tcp or udp) - Protocol AddPortRequestProtocol `json:"protocol"` + // StartDate When the daemon started + StartDate *time.Time `json:"start_date"` } -// AddPortRequestProtocol Network protocol (tcp or udp) -type AddPortRequestProtocol string - -// AugmentedPort defines model for AugmentedPort. -type AugmentedPort struct { - CheckActivity *bool `json:"check_activity,omitempty"` - Description *string `json:"description,omitempty"` - FinishAfterCommand *string `json:"finish_after_command,omitempty"` +// RuntimePortStatus defines model for RuntimePortStatus. +type RuntimePortStatus struct { + Bound bool `json:"bound"` + HostIp *string `json:"host_ip,omitempty"` + HostPort *int `json:"host_port,omitempty"` + KeepAliveTraffic *string `json:"keepAliveTraffic,omitempty"` + LastActivityAt *time.Time `json:"last_activity_at,omitempty"` + Name string `json:"name"` + Port int `json:"port"` + Procedure string `json:"procedure"` + Protocol string `json:"protocol"` + RxBytes *int64 `json:"rx_bytes,omitempty"` + Source string `json:"source"` + Traffic bool `json:"traffic"` + TrafficBytes *int64 `json:"traffic_bytes,omitempty"` + TrafficOk *bool `json:"traffic_ok,omitempty"` + TrafficWindow *string `json:"traffic_window,omitempty"` + TxBytes *int64 `json:"tx_bytes,omitempty"` +} + +// RuntimeScroll defines model for RuntimeScroll. +type RuntimeScroll struct { + Artifact string `json:"artifact"` + Commands *map[string]interface{} `json:"commands,omitempty"` + CreatedAt time.Time `json:"created_at"` + DataRoot string `json:"data_root"` + Id string `json:"id"` + OwnerId *string `json:"owner_id,omitempty"` + ScrollName string `json:"scroll_name"` + ScrollRoot string `json:"scroll_root"` + Status RuntimeScrollStatus `json:"status"` + UpdatedAt time.Time `json:"updated_at"` +} + +// RuntimeScrollStatus defines model for RuntimeScroll.Status. +type RuntimeScrollStatus string + +// CreateScrollJSONRequestBody defines body for CreateScroll for application/json ContentType. +type CreateScrollJSONRequestBody = CreateScrollRequest + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} - // InactiveSince When the port became inactive - InactiveSince time.Time `json:"inactive_since"` +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string - // InactiveSinceSec Seconds since port became inactive - InactiveSinceSec int `json:"inactive_since_sec"` - Mandatory *bool `json:"mandatory,omitempty"` + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer - // Name Port name/identifier - Name string `json:"name"` + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} - // Open Whether the port is currently open - Open bool `json:"open"` +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error - // Port Port number - Port int `json:"port"` +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} - // Protocol Network protocol - Protocol string `json:"protocol"` - SleepHandler *string `json:"sleep_handler"` - StartDelay *int `json:"start_delay,omitempty"` - Vars *[]ColdStarterVars `json:"vars,omitempty"` +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } } -// ColdStarterVars defines model for ColdStarterVars. -type ColdStarterVars struct { - Name string `json:"name"` - Value string `json:"value"` +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } } -// CommandInstructionSet defines model for CommandInstructionSet. -type CommandInstructionSet struct { - Dependencies *[]string `json:"dependencies,omitempty"` - Needs *[]string `json:"needs,omitempty"` - Procedures []Procedure `json:"procedures"` +// The interface specification for the client above. +type ClientInterface interface { + // GetHealthAuth request + GetHealthAuth(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) - // Run Run mode for the command - Run *CommandInstructionSetRun `json:"run,omitempty"` -} + // ListScrolls request + ListScrolls(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) -// CommandInstructionSetRun Run mode for the command -type CommandInstructionSetRun string + // CreateScrollWithBody request with any body + CreateScrollWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) -// Console defines model for Console. -type Console struct { - // Exit Exit code if console has exited - Exit *int `json:"exit"` + CreateScroll(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) - // InputMode Input mode for the console - InputMode string `json:"inputMode"` + // DeleteScroll request + DeleteScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) - // Type Console type - Type ConsoleType `json:"type"` -} + // GetScroll request + GetScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) -// ConsoleType Console type -type ConsoleType string + // RunScrollCommand request + RunScrollCommand(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*http.Response, error) -// ConsolesResponse defines model for ConsolesResponse. -type ConsolesResponse struct { - Consoles map[string]Console `json:"consoles"` + // GetScrollPorts request + GetScrollPorts(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) } -// Cronjob defines model for Cronjob. -type Cronjob struct { - Command string `json:"command"` - Name string `json:"name"` - - // Schedule Cron schedule expression - Schedule string `json:"schedule"` +func (c *Client) GetHealthAuth(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetHealthAuthRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) } -// ErrorResponse defines model for ErrorResponse. -type ErrorResponse struct { - // Error Error message - Error string `json:"error"` - Status string `json:"status"` +func (c *Client) ListScrolls(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewListScrollsRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) } -// HealthResponse defines model for HealthResponse. -type HealthResponse struct { - // Mode Current health status mode - Mode string `json:"mode"` +func (c *Client) CreateScrollWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewCreateScrollRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} - // Progress Progress percentage for loading operations - Progress *float32 `json:"progress,omitempty"` +func (c *Client) CreateScroll(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewCreateScrollRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} - // StartDate When the daemon started - StartDate *time.Time `json:"start_date"` +func (c *Client) DeleteScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDeleteScrollRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) } -// Port defines model for Port. -type Port struct { - // CheckActivity Whether to monitor port activity - CheckActivity *bool `json:"check_activity,omitempty"` +func (c *Client) GetScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} - // Description Port description - Description *string `json:"description,omitempty"` +func (c *Client) RunScrollCommand(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewRunScrollCommandRequest(c.Server, id, command) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} - // FinishAfterCommand Command to run after port is available - FinishAfterCommand *string `json:"finish_after_command,omitempty"` +func (c *Client) GetScrollPorts(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollPortsRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} - // Mandatory Whether this port must be open for health check - Mandatory *bool `json:"mandatory,omitempty"` +// NewGetHealthAuthRequest generates requests for GetHealthAuth +func NewGetHealthAuthRequest(server string) (*http.Request, error) { + var err error - // Name Port name/identifier - Name string `json:"name"` + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } - // Port Port number - Port int `json:"port"` + operationPath := fmt.Sprintf("/api/v1/health") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } - // Protocol Network protocol - Protocol string `json:"protocol"` + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } - // SleepHandler Handler to call when port becomes inactive - SleepHandler *string `json:"sleep_handler"` + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } - // StartDelay Delay in seconds before starting port check - StartDelay *int `json:"start_delay,omitempty"` - Vars *[]ColdStarterVars `json:"vars,omitempty"` + return req, nil } -// Procedure defines model for Procedure. -type Procedure struct { - // Data Procedure data payload - Data interface{} `json:"data,omitempty"` +// NewListScrollsRequest generates requests for ListScrolls +func NewListScrollsRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } - // Id Unique procedure identifier - Id *string `json:"id"` + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } - // IgnoreFailure Whether to continue on failure - IgnoreFailure *bool `json:"ignore_failure,omitempty"` + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } - // Mode Procedure execution mode - Mode string `json:"mode"` + return req, nil +} - // Wait Wait condition - Wait *Procedure_Wait `json:"wait,omitempty"` +// NewCreateScrollRequest calls the generic CreateScroll builder with application/json body +func NewCreateScrollRequest(server string, body CreateScrollJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewCreateScrollRequestWithBody(server, "application/json", bodyReader) } -// ProcedureWait0 defines model for . -type ProcedureWait0 = string +// NewCreateScrollRequestWithBody generates requests for CreateScroll with any type of body +func NewCreateScrollRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error -// ProcedureWait1 defines model for . -type ProcedureWait1 = int + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } -// ProcedureWait2 defines model for . -type ProcedureWait2 = bool + operationPath := fmt.Sprintf("/api/v1/scrolls") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } -// Procedure_Wait Wait condition -type Procedure_Wait struct { - union json.RawMessage -} + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } -// Process defines model for Process. -type Process struct { - // Name Process name/identifier - Name string `json:"name"` + req.Header.Add("Content-Type", contentType) - // Type Process type - Type string `json:"type"` + return req, nil } -// ProcessMonitorMetrics defines model for ProcessMonitorMetrics. -type ProcessMonitorMetrics struct { - // Connections Active network connections - Connections []string `json:"connections"` +// NewDeleteScrollRequest generates requests for DeleteScroll +func NewDeleteScrollRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string - // Cpu CPU usage percentage - Cpu float64 `json:"cpu"` + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } - // Memory Memory usage in bytes - Memory int `json:"memory"` + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } - // Pid Process ID - Pid int `json:"pid"` + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil } -// ProcessTreeNode defines model for ProcessTreeNode. -type ProcessTreeNode struct { - Children *[]ProcessTreeNode `json:"children,omitempty"` - Cmdline *string `json:"cmdline,omitempty"` - CpuPercent *float64 `json:"cpu_percent,omitempty"` - Gids *[]int `json:"gids,omitempty"` +// NewGetScrollRequest generates requests for GetScroll +func NewGetScrollRequest(server string, id string) (*http.Request, error) { + var err error - // IoCounters I/O counters - IoCounters *string `json:"io_counters,omitempty"` + var pathParam0 string - // Memory Memory statistics - Memory *string `json:"memory,omitempty"` + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } - // MemoryEx Extended memory statistics - MemoryEx *string `json:"memory_ex,omitempty"` - Name *string `json:"name,omitempty"` + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } - // Process Process information (simplified from gopsutil) - Process *string `json:"process,omitempty"` - Username *string `json:"username,omitempty"` -} + operationPath := fmt.Sprintf("/api/v1/scrolls/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } -// ProcessTreeRoot defines model for ProcessTreeRoot. -type ProcessTreeRoot struct { - Root ProcessTreeNode `json:"root"` - TotalCpuPercent float64 `json:"total_cpu_percent"` - TotalIoCountersRead int64 `json:"total_io_counters_read"` - TotalIoCountersWrite int64 `json:"total_io_counters_write"` - TotalMemoryRss int64 `json:"total_memory_rss"` - TotalMemorySwap int64 `json:"total_memory_swap"` - TotalMemoryVms int64 `json:"total_memory_vms"` - TotalProcessCount int `json:"total_process_count"` -} + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } -// ProcessesResponse defines model for ProcessesResponse. -type ProcessesResponse struct { - Processes map[string]Process `json:"processes"` -} + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } -// QueueResponse Map of command IDs to their execution status -type QueueResponse map[string]ScrollLockStatus + return req, nil +} -// ScrollFile Scroll configuration file structure -type ScrollFile struct { - // AppVersion Application version (not necessarily semver) - AppVersion *string `json:"app_version,omitempty"` - Commands *map[string]CommandInstructionSet `json:"commands,omitempty"` - Cronjobs *[]Cronjob `json:"cronjobs,omitempty"` +// NewRunScrollCommandRequest generates requests for RunScrollCommand +func NewRunScrollCommandRequest(server string, id string, command string) (*http.Request, error) { + var err error - // Desc Scroll description - Desc *string `json:"desc,omitempty"` + var pathParam0 string - // Init Initialization command (deprecated, use serve) - // Deprecated: this property has been marked as deprecated upstream, but no `x-deprecated-reason` was set - Init *string `json:"init,omitempty"` + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } - // KeepAlivePPM Keep alive packets per minute - KeepAlivePPM *int `json:"keepAlivePPM,omitempty"` + var pathParam1 string - // Name Scroll name - Name *string `json:"name,omitempty"` - Plugins *map[string]map[string]string `json:"plugins,omitempty"` - Ports *[]Port `json:"ports,omitempty"` + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "command", runtime.ParamLocationPath, command) + if err != nil { + return nil, err + } - // Serve Serve command - Serve *string `json:"serve,omitempty"` + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } - // Version Scroll version (semver) - Version *string `json:"version,omitempty"` -} + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/commands/%s", pathParam0, pathParam1) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } -// ScrollLockStatus Status of a command in the queue -type ScrollLockStatus string + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } -// ScrollLogStream defines model for ScrollLogStream. -type ScrollLogStream struct { - // Key The log stream identifier - Key string `json:"key"` + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } - // Log Array of log lines - Log []string `json:"log"` + return req, nil } -// StartCommandRequest defines model for StartCommandRequest. -type StartCommandRequest struct { - // Command The command ID to execute - Command string `json:"command"` +// NewGetScrollPortsRequest generates requests for GetScrollPorts +func NewGetScrollPortsRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string - // Sync Whether to run synchronously (wait for completion) - Sync *bool `json:"sync,omitempty"` -} + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } -// StartProcedureRequest defines model for StartProcedureRequest. -type StartProcedureRequest struct { - // Data The data payload for the procedure - Data string `json:"data"` + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } - // Dependencies List of dependency IDs this procedure depends on - Dependencies *[]string `json:"dependencies,omitempty"` + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/ports", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } - // Mode The procedure mode (e.g., "stdin", or plugin mode) - Mode string `json:"mode"` + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } - // Process The process name to run the procedure against - Process string `json:"process"` + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } - // Sync Whether to run synchronously - Sync *bool `json:"sync,omitempty"` + return req, nil } -// TokenResponse defines model for TokenResponse. -type TokenResponse struct { - // Token The generated authentication token - Token string `json:"token"` +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil } -// WatchModeRequest defines model for WatchModeRequest. -type WatchModeRequest struct { - // HotReloadCommands Commands to run when files change - HotReloadCommands *[]string `json:"hotReloadCommands,omitempty"` - - // WatchPaths Directories to watch - WatchPaths []string `json:"watchPaths"` +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface } -// WatchModeResponse defines model for WatchModeResponse. -type WatchModeResponse struct { - // Enabled Current watch mode state - Enabled bool `json:"enabled"` +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} - // Status Result status of the operation - Status string `json:"status"` +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } } -// WatchStatusResponse defines model for WatchStatusResponse. -type WatchStatusResponse struct { - // Enabled Whether watch mode is currently enabled - Enabled bool `json:"enabled"` +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // GetHealthAuthWithResponse request + GetHealthAuthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthAuthResponse, error) - // WatchedPaths List of currently watched file paths - WatchedPaths []string `json:"watchedPaths"` -} + // ListScrollsWithResponse request + ListScrollsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListScrollsResponse, error) -// RunCommandJSONRequestBody defines body for RunCommand for application/json ContentType. -type RunCommandJSONRequestBody = StartCommandRequest + // CreateScrollWithBodyWithResponse request with any body + CreateScrollWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) -// AddPortJSONRequestBody defines body for AddPort for application/json ContentType. -type AddPortJSONRequestBody = AddPortRequest + CreateScrollWithResponse(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) -// RunProcedureJSONRequestBody defines body for RunProcedure for application/json ContentType. -type RunProcedureJSONRequestBody = StartProcedureRequest + // DeleteScrollWithResponse request + DeleteScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteScrollResponse, error) -// AddCommandJSONRequestBody defines body for AddCommand for application/json ContentType. -type AddCommandJSONRequestBody = CommandInstructionSet + // GetScrollWithResponse request + GetScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollResponse, error) -// EnableWatchJSONRequestBody defines body for EnableWatch for application/json ContentType. -type EnableWatchJSONRequestBody = WatchModeRequest + // RunScrollCommandWithResponse request + RunScrollCommandWithResponse(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*RunScrollCommandResponse, error) -// AsProcedureWait0 returns the union data inside the Procedure_Wait as a ProcedureWait0 -func (t Procedure_Wait) AsProcedureWait0() (ProcedureWait0, error) { - var body ProcedureWait0 - err := json.Unmarshal(t.union, &body) - return body, err + // GetScrollPortsWithResponse request + GetScrollPortsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollPortsResponse, error) } -// FromProcedureWait0 overwrites any union data inside the Procedure_Wait as the provided ProcedureWait0 -func (t *Procedure_Wait) FromProcedureWait0(v ProcedureWait0) error { - b, err := json.Marshal(v) - t.union = b - return err +type GetHealthAuthResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *HealthResponse + JSON503 *HealthResponse } -// MergeProcedureWait0 performs a merge with any union data inside the Procedure_Wait, using the provided ProcedureWait0 -func (t *Procedure_Wait) MergeProcedureWait0(v ProcedureWait0) error { - b, err := json.Marshal(v) - if err != nil { - return err +// Status returns HTTPResponse.Status +func (r GetHealthAuthResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err + return http.StatusText(0) } -// AsProcedureWait1 returns the union data inside the Procedure_Wait as a ProcedureWait1 -func (t Procedure_Wait) AsProcedureWait1() (ProcedureWait1, error) { - var body ProcedureWait1 - err := json.Unmarshal(t.union, &body) - return body, err +// StatusCode returns HTTPResponse.StatusCode +func (r GetHealthAuthResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 } -// FromProcedureWait1 overwrites any union data inside the Procedure_Wait as the provided ProcedureWait1 -func (t *Procedure_Wait) FromProcedureWait1(v ProcedureWait1) error { - b, err := json.Marshal(v) - t.union = b - return err +type ListScrollsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]RuntimeScroll } -// MergeProcedureWait1 performs a merge with any union data inside the Procedure_Wait, using the provided ProcedureWait1 -func (t *Procedure_Wait) MergeProcedureWait1(v ProcedureWait1) error { - b, err := json.Marshal(v) - if err != nil { - return err +// Status returns HTTPResponse.Status +func (r ListScrollsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err + return http.StatusText(0) } -// AsProcedureWait2 returns the union data inside the Procedure_Wait as a ProcedureWait2 -func (t Procedure_Wait) AsProcedureWait2() (ProcedureWait2, error) { - var body ProcedureWait2 - err := json.Unmarshal(t.union, &body) - return body, err +// StatusCode returns HTTPResponse.StatusCode +func (r ListScrollsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 } -// FromProcedureWait2 overwrites any union data inside the Procedure_Wait as the provided ProcedureWait2 -func (t *Procedure_Wait) FromProcedureWait2(v ProcedureWait2) error { - b, err := json.Marshal(v) - t.union = b - return err +type CreateScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON201 *RuntimeScroll } -// MergeProcedureWait2 performs a merge with any union data inside the Procedure_Wait, using the provided ProcedureWait2 -func (t *Procedure_Wait) MergeProcedureWait2(v ProcedureWait2) error { - b, err := json.Marshal(v) - if err != nil { - return err +// Status returns HTTPResponse.Status +func (r CreateScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status } + return http.StatusText(0) +} - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err +// StatusCode returns HTTPResponse.StatusCode +func (r CreateScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 } -func (t Procedure_Wait) MarshalJSON() ([]byte, error) { - b, err := t.union.MarshalJSON() - return b, err +type DeleteScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *DeletedScroll } -func (t *Procedure_Wait) UnmarshalJSON(b []byte) error { - err := t.union.UnmarshalJSON(b) - return err +// Status returns HTTPResponse.Status +func (r DeleteScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) } -// ServerInterface represents all server handlers. -type ServerInterface interface { - // Finish cold start - // (POST /api/v1/coldstarter/finish) - FinishColdstarter(c *fiber.Ctx) error - // Run a command - // (POST /api/v1/command) - RunCommand(c *fiber.Ctx) error - // List all consoles - // (GET /api/v1/consoles) - GetConsoles(c *fiber.Ctx) error - // Stop daemon - // (POST /api/v1/daemon/stop) - StopDaemon(c *fiber.Ctx) error - // Get health status - // (GET /api/v1/health) - GetHealthAuth(c *fiber.Ctx) error - // List all log streams - // (GET /api/v1/logs) - ListAllLogs(c *fiber.Ctx) error - // List logs for a specific stream - // (GET /api/v1/logs/{stream}) - ListStreamLogs(c *fiber.Ctx, stream string) error - // Get process metrics - // (GET /api/v1/metrics) - GetMetrics(c *fiber.Ctx) error - // Get port information - // (GET /api/v1/ports) - GetPorts(c *fiber.Ctx) error - // Add a port to watch - // (POST /api/v1/ports) - AddPort(c *fiber.Ctx) error - // Remove a watched port - // (DELETE /api/v1/ports/{port}) - DeletePort(c *fiber.Ctx, port int) error - // Run a procedure - // (POST /api/v1/procedure) - RunProcedure(c *fiber.Ctx) error - // Get procedure statuses - // (GET /api/v1/procedures) - GetProcedures(c *fiber.Ctx) error - // List running processes - // (GET /api/v1/processes) - GetProcesses(c *fiber.Ctx) error - // Get process tree - // (GET /api/v1/pstree) - GetPsTree(c *fiber.Ctx) error - // Get command queue - // (GET /api/v1/queue) - GetQueue(c *fiber.Ctx) error - // Get current scroll - // (GET /api/v1/scroll) - GetScroll(c *fiber.Ctx) error - // Add command to current scroll - // (PUT /api/v1/scroll/commands/{command}) - AddCommand(c *fiber.Ctx, command string) error - // Create WebSocket token - // (GET /api/v1/token) - CreateToken(c *fiber.Ctx) error - // Disable development mode - // (POST /api/v1/watch/disable) - DisableWatch(c *fiber.Ctx) error - // Enable development mode - // (POST /api/v1/watch/enable) - EnableWatch(c *fiber.Ctx) error - // Get watch mode status - // (GET /api/v1/watch/status) - GetWatchStatus(c *fiber.Ctx) error +// StatusCode returns HTTPResponse.StatusCode +func (r DeleteScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 } -// ServerInterfaceWrapper converts contexts to parameters. -type ServerInterfaceWrapper struct { - Handler ServerInterface +type GetScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll } -type MiddlewareFunc fiber.Handler +// Status returns HTTPResponse.Status +func (r GetScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} -// FinishColdstarter operation middleware -func (siw *ServerInterfaceWrapper) FinishColdstarter(c *fiber.Ctx) error { +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} - c.Context().SetUserValue(BearerAuthScopes, []string{}) +type RunScrollCommandResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} - return siw.Handler.FinishColdstarter(c) +// Status returns HTTPResponse.Status +func (r RunScrollCommandResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) } -// RunCommand operation middleware -func (siw *ServerInterfaceWrapper) RunCommand(c *fiber.Ctx) error { +// StatusCode returns HTTPResponse.StatusCode +func (r RunScrollCommandResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} - c.Context().SetUserValue(BearerAuthScopes, []string{}) +type GetScrollPortsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]RuntimePortStatus +} - return siw.Handler.RunCommand(c) +// Status returns HTTPResponse.Status +func (r GetScrollPortsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) } -// GetConsoles operation middleware -func (siw *ServerInterfaceWrapper) GetConsoles(c *fiber.Ctx) error { +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollPortsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} - c.Context().SetUserValue(BearerAuthScopes, []string{}) +// GetHealthAuthWithResponse request returning *GetHealthAuthResponse +func (c *ClientWithResponses) GetHealthAuthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthAuthResponse, error) { + rsp, err := c.GetHealthAuth(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetHealthAuthResponse(rsp) +} - return siw.Handler.GetConsoles(c) +// ListScrollsWithResponse request returning *ListScrollsResponse +func (c *ClientWithResponses) ListScrollsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListScrollsResponse, error) { + rsp, err := c.ListScrolls(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseListScrollsResponse(rsp) } -// StopDaemon operation middleware -func (siw *ServerInterfaceWrapper) StopDaemon(c *fiber.Ctx) error { +// CreateScrollWithBodyWithResponse request with arbitrary body returning *CreateScrollResponse +func (c *ClientWithResponses) CreateScrollWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) { + rsp, err := c.CreateScrollWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCreateScrollResponse(rsp) +} - c.Context().SetUserValue(BearerAuthScopes, []string{}) +func (c *ClientWithResponses) CreateScrollWithResponse(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) { + rsp, err := c.CreateScroll(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCreateScrollResponse(rsp) +} - return siw.Handler.StopDaemon(c) +// DeleteScrollWithResponse request returning *DeleteScrollResponse +func (c *ClientWithResponses) DeleteScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteScrollResponse, error) { + rsp, err := c.DeleteScroll(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseDeleteScrollResponse(rsp) } -// GetHealthAuth operation middleware -func (siw *ServerInterfaceWrapper) GetHealthAuth(c *fiber.Ctx) error { +// GetScrollWithResponse request returning *GetScrollResponse +func (c *ClientWithResponses) GetScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollResponse, error) { + rsp, err := c.GetScroll(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollResponse(rsp) +} - c.Context().SetUserValue(BearerAuthScopes, []string{}) +// RunScrollCommandWithResponse request returning *RunScrollCommandResponse +func (c *ClientWithResponses) RunScrollCommandWithResponse(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*RunScrollCommandResponse, error) { + rsp, err := c.RunScrollCommand(ctx, id, command, reqEditors...) + if err != nil { + return nil, err + } + return ParseRunScrollCommandResponse(rsp) +} - return siw.Handler.GetHealthAuth(c) +// GetScrollPortsWithResponse request returning *GetScrollPortsResponse +func (c *ClientWithResponses) GetScrollPortsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollPortsResponse, error) { + rsp, err := c.GetScrollPorts(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollPortsResponse(rsp) } -// ListAllLogs operation middleware -func (siw *ServerInterfaceWrapper) ListAllLogs(c *fiber.Ctx) error { +// ParseGetHealthAuthResponse parses an HTTP response from a GetHealthAuthWithResponse call +func ParseGetHealthAuthResponse(rsp *http.Response) (*GetHealthAuthResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } - c.Context().SetUserValue(BearerAuthScopes, []string{}) + response := &GetHealthAuthResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } - return siw.Handler.ListAllLogs(c) -} + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest HealthResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest -// ListStreamLogs operation middleware -func (siw *ServerInterfaceWrapper) ListStreamLogs(c *fiber.Ctx) error { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 503: + var dest HealthResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON503 = &dest - var err error + } - // ------------- Path parameter "stream" ------------- - var stream string + return response, nil +} - err = runtime.BindStyledParameterWithOptions("simple", "stream", c.Params("stream"), &stream, runtime.BindStyledParameterOptions{Explode: false, Required: true}) +// ParseListScrollsResponse parses an HTTP response from a ListScrollsWithResponse call +func ParseListScrollsResponse(rsp *http.Response) (*ListScrollsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() if err != nil { - return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter stream: %w", err).Error()) + return nil, err } - c.Context().SetUserValue(BearerAuthScopes, []string{}) - - return siw.Handler.ListStreamLogs(c, stream) -} + response := &ListScrollsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } -// GetMetrics operation middleware -func (siw *ServerInterfaceWrapper) GetMetrics(c *fiber.Ctx) error { + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest - c.Context().SetUserValue(BearerAuthScopes, []string{}) + } - return siw.Handler.GetMetrics(c) + return response, nil } -// GetPorts operation middleware -func (siw *ServerInterfaceWrapper) GetPorts(c *fiber.Ctx) error { +// ParseCreateScrollResponse parses an HTTP response from a CreateScrollWithResponse call +func ParseCreateScrollResponse(rsp *http.Response) (*CreateScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &CreateScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 201: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON201 = &dest - c.Context().SetUserValue(BearerAuthScopes, []string{}) + } - return siw.Handler.GetPorts(c) + return response, nil } -// AddPort operation middleware -func (siw *ServerInterfaceWrapper) AddPort(c *fiber.Ctx) error { +// ParseDeleteScrollResponse parses an HTTP response from a DeleteScrollWithResponse call +func ParseDeleteScrollResponse(rsp *http.Response) (*DeleteScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } - c.Context().SetUserValue(BearerAuthScopes, []string{}) + response := &DeleteScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } - return siw.Handler.AddPort(c) + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest DeletedScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil } -// DeletePort operation middleware -func (siw *ServerInterfaceWrapper) DeletePort(c *fiber.Ctx) error { +// ParseGetScrollResponse parses an HTTP response from a GetScrollWithResponse call +func ParseGetScrollResponse(rsp *http.Response) (*GetScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } - var err error + response := &GetScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } - // ------------- Path parameter "port" ------------- - var port int + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest - err = runtime.BindStyledParameterWithOptions("simple", "port", c.Params("port"), &port, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + } + + return response, nil +} + +// ParseRunScrollCommandResponse parses an HTTP response from a RunScrollCommandWithResponse call +func ParseRunScrollCommandResponse(rsp *http.Response) (*RunScrollCommandResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() if err != nil { - return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter port: %w", err).Error()) + return nil, err + } + + response := &RunScrollCommandResponse{ + Body: bodyBytes, + HTTPResponse: rsp, } - c.Context().SetUserValue(BearerAuthScopes, []string{}) + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } - return siw.Handler.DeletePort(c, port) + return response, nil } -// RunProcedure operation middleware -func (siw *ServerInterfaceWrapper) RunProcedure(c *fiber.Ctx) error { +// ParseGetScrollPortsResponse parses an HTTP response from a GetScrollPortsWithResponse call +func ParseGetScrollPortsResponse(rsp *http.Response) (*GetScrollPortsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } - c.Context().SetUserValue(BearerAuthScopes, []string{}) + response := &GetScrollPortsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } - return siw.Handler.RunProcedure(c) -} + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []RuntimePortStatus + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest -// GetProcedures operation middleware -func (siw *ServerInterfaceWrapper) GetProcedures(c *fiber.Ctx) error { + } - c.Context().SetUserValue(BearerAuthScopes, []string{}) + return response, nil +} - return siw.Handler.GetProcedures(c) +// ServerInterface represents all server handlers. +type ServerInterface interface { + // Get health status + // (GET /api/v1/health) + GetHealthAuth(c *fiber.Ctx) error + // List runtime scrolls + // (GET /api/v1/scrolls) + ListScrolls(c *fiber.Ctx) error + // Create runtime scroll + // (POST /api/v1/scrolls) + CreateScroll(c *fiber.Ctx) error + // Delete runtime scroll + // (DELETE /api/v1/scrolls/{id}) + DeleteScroll(c *fiber.Ctx, id string) error + // Get runtime scroll + // (GET /api/v1/scrolls/{id}) + GetScroll(c *fiber.Ctx, id string) error + // Run runtime scroll command + // (POST /api/v1/scrolls/{id}/commands/{command}) + RunScrollCommand(c *fiber.Ctx, id string, command string) error + // Get runtime scroll port status + // (GET /api/v1/scrolls/{id}/ports) + GetScrollPorts(c *fiber.Ctx, id string) error } -// GetProcesses operation middleware -func (siw *ServerInterfaceWrapper) GetProcesses(c *fiber.Ctx) error { +// ServerInterfaceWrapper converts contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface +} + +type MiddlewareFunc fiber.Handler - c.Context().SetUserValue(BearerAuthScopes, []string{}) +// GetHealthAuth operation middleware +func (siw *ServerInterfaceWrapper) GetHealthAuth(c *fiber.Ctx) error { - return siw.Handler.GetProcesses(c) + return siw.Handler.GetHealthAuth(c) } -// GetPsTree operation middleware -func (siw *ServerInterfaceWrapper) GetPsTree(c *fiber.Ctx) error { +// ListScrolls operation middleware +func (siw *ServerInterfaceWrapper) ListScrolls(c *fiber.Ctx) error { + + return siw.Handler.ListScrolls(c) +} - c.Context().SetUserValue(BearerAuthScopes, []string{}) +// CreateScroll operation middleware +func (siw *ServerInterfaceWrapper) CreateScroll(c *fiber.Ctx) error { - return siw.Handler.GetPsTree(c) + return siw.Handler.CreateScroll(c) } -// GetQueue operation middleware -func (siw *ServerInterfaceWrapper) GetQueue(c *fiber.Ctx) error { +// DeleteScroll operation middleware +func (siw *ServerInterfaceWrapper) DeleteScroll(c *fiber.Ctx) error { + + var err error - c.Context().SetUserValue(BearerAuthScopes, []string{}) + // ------------- Path parameter "id" ------------- + var id string - return siw.Handler.GetQueue(c) + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.DeleteScroll(c, id) } // GetScroll operation middleware func (siw *ServerInterfaceWrapper) GetScroll(c *fiber.Ctx) error { - c.Context().SetUserValue(BearerAuthScopes, []string{}) + var err error + + // ------------- Path parameter "id" ------------- + var id string - return siw.Handler.GetScroll(c) + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScroll(c, id) } -// AddCommand operation middleware -func (siw *ServerInterfaceWrapper) AddCommand(c *fiber.Ctx) error { +// RunScrollCommand operation middleware +func (siw *ServerInterfaceWrapper) RunScrollCommand(c *fiber.Ctx) error { var err error + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + // ------------- Path parameter "command" ------------- var command string @@ -751,41 +1122,23 @@ func (siw *ServerInterfaceWrapper) AddCommand(c *fiber.Ctx) error { return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter command: %w", err).Error()) } - c.Context().SetUserValue(BearerAuthScopes, []string{}) - - return siw.Handler.AddCommand(c, command) -} - -// CreateToken operation middleware -func (siw *ServerInterfaceWrapper) CreateToken(c *fiber.Ctx) error { - - c.Context().SetUserValue(BearerAuthScopes, []string{}) - - return siw.Handler.CreateToken(c) -} - -// DisableWatch operation middleware -func (siw *ServerInterfaceWrapper) DisableWatch(c *fiber.Ctx) error { - - c.Context().SetUserValue(BearerAuthScopes, []string{}) - - return siw.Handler.DisableWatch(c) + return siw.Handler.RunScrollCommand(c, id, command) } -// EnableWatch operation middleware -func (siw *ServerInterfaceWrapper) EnableWatch(c *fiber.Ctx) error { +// GetScrollPorts operation middleware +func (siw *ServerInterfaceWrapper) GetScrollPorts(c *fiber.Ctx) error { - c.Context().SetUserValue(BearerAuthScopes, []string{}) - - return siw.Handler.EnableWatch(c) -} + var err error -// GetWatchStatus operation middleware -func (siw *ServerInterfaceWrapper) GetWatchStatus(c *fiber.Ctx) error { + // ------------- Path parameter "id" ------------- + var id string - c.Context().SetUserValue(BearerAuthScopes, []string{}) + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } - return siw.Handler.GetWatchStatus(c) + return siw.Handler.GetScrollPorts(c, id) } // FiberServerOptions provides options for the Fiber server. @@ -809,126 +1162,53 @@ func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, option router.Use(fiber.Handler(m)) } - router.Post(options.BaseURL+"/api/v1/coldstarter/finish", wrapper.FinishColdstarter) - - router.Post(options.BaseURL+"/api/v1/command", wrapper.RunCommand) - - router.Get(options.BaseURL+"/api/v1/consoles", wrapper.GetConsoles) - - router.Post(options.BaseURL+"/api/v1/daemon/stop", wrapper.StopDaemon) - router.Get(options.BaseURL+"/api/v1/health", wrapper.GetHealthAuth) - router.Get(options.BaseURL+"/api/v1/logs", wrapper.ListAllLogs) - - router.Get(options.BaseURL+"/api/v1/logs/:stream", wrapper.ListStreamLogs) - - router.Get(options.BaseURL+"/api/v1/metrics", wrapper.GetMetrics) - - router.Get(options.BaseURL+"/api/v1/ports", wrapper.GetPorts) - - router.Post(options.BaseURL+"/api/v1/ports", wrapper.AddPort) - - router.Delete(options.BaseURL+"/api/v1/ports/:port", wrapper.DeletePort) - - router.Post(options.BaseURL+"/api/v1/procedure", wrapper.RunProcedure) - - router.Get(options.BaseURL+"/api/v1/procedures", wrapper.GetProcedures) - - router.Get(options.BaseURL+"/api/v1/processes", wrapper.GetProcesses) - - router.Get(options.BaseURL+"/api/v1/pstree", wrapper.GetPsTree) - - router.Get(options.BaseURL+"/api/v1/queue", wrapper.GetQueue) - - router.Get(options.BaseURL+"/api/v1/scroll", wrapper.GetScroll) + router.Get(options.BaseURL+"/api/v1/scrolls", wrapper.ListScrolls) - router.Put(options.BaseURL+"/api/v1/scroll/commands/:command", wrapper.AddCommand) + router.Post(options.BaseURL+"/api/v1/scrolls", wrapper.CreateScroll) - router.Get(options.BaseURL+"/api/v1/token", wrapper.CreateToken) + router.Delete(options.BaseURL+"/api/v1/scrolls/:id", wrapper.DeleteScroll) - router.Post(options.BaseURL+"/api/v1/watch/disable", wrapper.DisableWatch) + router.Get(options.BaseURL+"/api/v1/scrolls/:id", wrapper.GetScroll) - router.Post(options.BaseURL+"/api/v1/watch/enable", wrapper.EnableWatch) + router.Post(options.BaseURL+"/api/v1/scrolls/:id/commands/:command", wrapper.RunScrollCommand) - router.Get(options.BaseURL+"/api/v1/watch/status", wrapper.GetWatchStatus) + router.Get(options.BaseURL+"/api/v1/scrolls/:id/ports", wrapper.GetScrollPorts) } // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/9RcW1MkN5b+K4rcfaA3qimw2xMTvDFgt5lx2wzg5cFNEKrMU1UySilbUhZd28F/39At", - "r0d1gaa9G/MwdOl2dG76ziX9JctlWUkBwujs5Eum8yWU1P15WhSXUpkr+FSDNvaXSskKlGHgxvMl5A/3", - "NDdsxcza/lLAnNbcZCdzyjVMsgJ0rlhlmBTZSXa7BLMERYwkpRTMSEUqqQxpdphkZl1BdpLNpORARfY0", - "2OLLYMff3B+U+326Y81W2igmFnankoqCGqn2oXTJtN+7rLUhMyCyAkHmUpElUG6WxPEAJVzQEsYUW4YS", - "OzRlBQjD5gxUNsngMy0rbnco1281qBXLAbuDpSW1aV3OQJGD47d/++GH7394093170d/P7IM+MzKusxO", - "3IRJVjLh/33cnMSEgQUod5SSRuaSj4/7FcyjVA8kziAHJq+IVKQuKnessJv+kZm8yiZZXVTZXfeG/ufB", - "1Z4mmYJPNVNQ2KXunh0aAjvvmnVy9ifkxtJ5Wi9KEAacru6ipFt1bMT1ORNML+/p3IC6z2VpNQmdyIQ7", - "CO41Ezki/dslCGKW4HVqBjktgcRF2SSbS1VSk51kBTXw1rASVYL+Kfca8vFJ15BLUWjipqSOGwu9ZyMv", - "1OkR3dZ2UJ4EWwt0Mk3yWikQhq+dvaHmtdUSspcpNXYBzQGq+yUVBQdltxA153RmtdqoGrAVhipzXwCn", - "XY52CFpR5dSUGSjdH/+pYJ6dZP8xbd3yNPjk6ZnkxbXdEdR/23VPzYlUKbre1YpGeoqqVJAXZnJDOkZG", - "F/VkxI8V5TU2MiA8kOln4xQ4I7wQ2qg6tyK8BsT4C6hAFCDy8O+GyyPC+nycZAKg2HNJpWQORa1gd4Fe", - "xiXYfqpGrOWqFqSUBbg3yJpMdEet26X8ka61lZ+XrQKnhVYRQGmmDQjTYWrKA7e3wQUgtOQwZjl8Zohd", - "/viZGZJbutmc5H4tWVJN7HSw1CdMqWMqTFS1+SALxANd2KEhXzyBk5TghnuEGxE32nnEHC5x3NCWqRWv", - "F0xs51/YpiV6Axf1FehKCo2wM1zD/U2Lgnm8c9mbs9lleDY8jU4f0NschNKppPhTzjDy0o9h0glY2oqa", - "YzJQUpA4TOBzpUBrD+da/HBE/sv/byuKCG6kOW/S0Itd8kelpEpLAuwwotn2Z1KC1nSBaps21NTBNOIV", - "/F7byA8rJ2E6RvPPDoWmiS5Rcznzz2vEsP4cZz49RssHFIIqubBiQR7fMEIqUDkIQxfeHLmkBRML+5Yr", - "aufqLtSZc0mtc2rQ6fHRUQebHjUkhGe9fVWp2QSxCgql1Sb3ThUpcLXlBR9IpEwZ8q7Y87UDossd4qAU", - "mh26QzdgaVO1IG52g8/oijLPtu1x1l8aV+0fQI3DpheiyC2BD4Iq+1v+7AesIHLKOXm0Ch4BvSxBdxH9", - "voC0f9S5/ZkwQXSIHmYwlwq8DVkLdscORPMXg1nUGhtYNUaE1FDUc/kFxI6Tiq6tz3KgAzGN3wX7VANp", - "EBLpqdxWEbCFkAru55TxQGLSK+RSGCZqIFKQOB+zCdzNt7eCz5DX9tfo5EdEPVIMtt1SB9uExx0OUsJv", - "8+zkj/GrjgQ34wjybneHGgBXMrRArqr1Lh4AR39xgwDbdsIVbtIG4j94n/4BjGK5RrGdgNw/iCOCTp1J", - "ExEcSnfuZI+gJK9qxLdf/k5qi1c6L3XvgZR1z7e3L28JJerYP7jfw6ZMkNnagMYjcMyiIvcvzpE1Q6Ba", - "1VlDyCTrM8buvkEiNwrg12Arw6ea8UL5/MTuoVtnR4z3ZcGZwDFwXtX3gfl2fAfWLxgekHZ4OySAyftc", - "1sKAQhTsYvobaUaxZ3yzqC1oZNpYxU4uvofPWChobExekHKXjZJBRNU6CFyXmPBMtV7vQLOy4tYlFGSu", - "ZEkWstK1YfwNdmatQSXOfdqsW1dSIjBQhV/3VSdpKL/fX1H8uo7s7xXQoreYCfO3d6h5jhc/KuZx9s6r", - "g/CVl86+y/QjrZ6zblXud1xQIH9RzJ4GfsdJEbkiQgZ2o6Rc0jzHVACnfoPL25RcqOKU52YX4iu9NbvQ", - "noSR+u8aauiS+RxarnMlOf9F5g/XPm4exkrZB1oROY8ZM3Jxri3GMktgqoOQmqh7RKY/4SeGZS78mH2m", - "52xR+zCXzBm34FnVufHIrc9+WlX3K1AajeROq4qz3O8TJpEDIQ0RYDlJFeNroqFcgUKdWLjlCxJHWIYV", - "83+5Tw7tgfxDNgl5sSwPkszdEtcyESFspSCnNugPEHyYLGSGUc7+xzM3qsNBu25Cag1Eg1oBytsHgOqU", - "sxVcXn4YU/svgIpQO0wqmj+AcQkRUjJRG7zqgmPacOsu2Ow9gC4RuVG+qd9TqLGV6egHG33tkde2sRoi", - "XsdSrFilVt1M9rhskLKSwKPGQFqDaMPu48OjwyMU0ycsvONDxgf6XJmcE9poDvMJp0/Wi3Uyx6oWwp40", - "yQopoEnj+VjLDtwhN40ULK6NAlqOffYDIJjsZgmEy4X1NUBLsjkA4nKBuBsrInstu41FrfuEGQNnb0n0", - "x2DO3sX8wbukWwtSaambtuRBLs6t//aeu5+5jAWPcfpjLfK9GhVULYhdtFRSyFrzNTmw4nOpKqv4HOya", - "N0hUPsqvpzPPjiVNtJ5kCp67uFn2sxZNBaTJT2CMGFbG+nv+wrSxytDMWvvX0qXt2lyJG9XEuePdQ1I8", - "WXHTpdiXcg7gcHE4IR8zbQomPmYTIhXxbs9NeDOQecFEImGNRwrNkSFxEOXdYx6hC8qEfiVt2q43IWHj", - "ZN/eBdOiG/kAIg32jB3GmbAAAco+fITWZml9RwAefs3WUpebhZF0S02+/CCLtE4vpbkCq7ZnHbyCZqJ1", - "ZKBLgFpopUm+pMKlLnbXvkdL0iU1S+Skc6YgN1IxcIe5qc93g52TtvAmWXISdMahSBdw3BHeWCxsxZOD", - "OvGUXYGuuYnVHzl3et/UaPq2Veeh/LlrxSpQnry4f0efcfVoTZ2r9xpG4kKME24NFAnpR6fX7hXmexxf", - "uVXP1oaWrh4Vd1jkpCGvFTPrawunPD9mQBWo09os23/9FEPdf97eZEOv88/bG2++PuHhIYFZW8e2YoVD", - "Bg6tORa57VqWLY2pfKD8ACKeOcBBS6nMWwtyCwt81DoeJhW5hdm1tMB3mLW0C93kmLw/yQYehlbsX2CZ", - "5/D8XIY0qaG59R2jiO5c1awgZ79cuKJU48wt8nJVJmoIp7XIl6CJRQwlFXQBmqyoYrLWRFtUS+T8o6Bt", - "uKUnhLMHIAtaggOsSk/cCzujGrR9gx5hFgYOPzrimXFm0pCTdSBrdnR4fHgU+59oxbKT7Hv30ySroiZO", - "acWmq+NpLnnhK5Zq6ut0zjSkRpLz12whKPe3tOt8maZhgrUKD1DA9/J4u74ospPsJ7f1WXuYbxNxtujo", - "+e7oOywlnUNlw6qujrpiQFc7/7h7uptkui5LqtbNWR0KLcPoQntQ1KXAV2wzVyRoGdIAQZwLP3r414Hj", - "Tt+tL9NITD5ixVUtzprAQ/ln6h+yWEfNC3m3jn5M/9Q+FPHRzta8BIJ3n/rewUapTyMJHKWLslGwxQBN", - "PE2y746O0+tCMZzQ0bJ32HH/oAVRkeRJ9gM250IYUFYPvUUQH+TspSFXtWjF19EOL7+kYrSNMQtAFOM9", - "GEI5b2vVTd9R3yv11eE9mNiUk+ES+SpKMWr8cRqBpqtG9Ov9uOueNertIN4rctg6MuepE0z2v021kVXa", - "At8rmsO85nxN7MRO88WIu9dGVudxaMDcYyzYlhVhLl2zt9txaxsy4oWbH7Db+taDjQoVmkp6jTOYDvm+", - "HEfZK2rRoPsH0SE/Y+2t9/tvePC172EntWh7RfaSn+X2kM1RikFQuBS5XOzjFNq0iSaPzCxDPjiyaShb", - "a02nLkHzYv+wUyZtmA8aw8wR63upnHC1Z7qM7g4t+7m/fIr50y9+ydNGKdiZDi1SoivI2ZzlaPpqzH7P", - "iSCBiipagq9w/jH2H+P9HAi1uKvFoP7YbPgiTzqyGoY9d69o1SORj0X8S5vri0e6N/wd5kPdNCENmcta", - "FM/QhLSodlOKsm2ASKpDmOOP4Tz2w0FB2tIR4mdjb8ULxfGC2tegyQOphI2kFyvU8c4lrfb3jVV/k44k", - "2l8wYTSJ/KQouoVzOpN1gx4cirYScVsg0rgMA6/vFvsf/ezjFH0LY3vFZ3B+uEPL+tCj1vB9koBMp0VB", - "KBHw6Dczksygo/Kzdfs1TPgYjBww8TZ0TEjB129G/A9f671SFDP4FnCnAOb4653eFzdiUq5ttihsRORz", - "VQ6QdiKbr0JHv0UcoeNCrChnRWiU7AWee+mZV5CoHDERmVKzoXlPv9j/e/J65xIAOLR2+zKxiEdt1bFz", - "t1tQs41P703U3/BZopFEQSlXgD/BVau5+AO8z7eLyOv8LtHz62nClObdt1Ma3338vAf6yl2A0CZXGTi5", - "i6r0umS3JFe0oaKgXIp+XWmUS7nsDL5aNmVUKts9n7IzEbu94q5CtG82pl35knzM/hmWXkFwlxxL/4u2", - "JGBwuba2Ls45CaVv0tkAgwvd0VfBb9v6GpJJl6orIlPrfZMuDULr74Hwvf2yLCmB2KG1MaJtqxY95qeA", - "82Vn8NUimXELWprjY6r3D1SwmzeOcDOfbTwDW7U8gm47uQlVduO3a+38C+OUpj11nwjF3fNF4Ynxt94t", - "NvEdNNuEEDS9yfe3rXuxA2fE/X+HgVfT9H4HI8JRN6H50u45HI3XjZeMLI3/xhgavEyKo1dgaiV0l6t8", - "7T7Xs2/ZLtWT92Cuoyd75YSI67tEOBtr4YHeOXtOpjHvbbLr++gHY41KT7+Evxzyrmo88NOEEgNlJala", - "N0I1ckCCa0Gc19yO0KLwK5RdMq9F7n0AM2vfI2Pn+r6fAlbApWs3lAUQakihalYcLhZowNhWvjaC+Vg+", - "+jX+1wNG+D3vldD2yKF9fYSYaFzdOWBNVNwU7F+DsFFcUsK7KlnTt5Nwir51x0L0TkkeK8b3e3tG6nDm", - "bngT6vGvZsz9LiXEnt83zUj+5ntx3F+ic+mmv2DHipcLo6YF0/4TvmTt3YbPri0kxtDjWNnvcRti91dj", - "6LiPCGHqbdstE+5WdCq63yo3EirFzygRB2ZG/1ZaM4qfMkbJBkanpepbcDYJlSrTl2rXqZZYPehH0Rfy", - "1/dnox66p+DL/o/oU2xs+taptkFzwrvj774tC065AlqsSfj2+v+TNXmlfZkxtV2F6Lt0toT8gbD5oE8v", - "KItrxnpcUuO76ghVQGZg7S2ksTCg2ekbfHV/OmhPTFpA4MLeeHPQuNmrbo95H7+cQMFZ2wN3enmRTbJa", - "8ewkm2b20LBl4qsJ3w3nxB+aivrxVKdC6gHK0+RLshDp0sjCmqRRDFaUt6tdaXC8dlgGcw16vgbi39Ow", - "vGxKa8kdmot0MsrNp2kpSOcCJ3StD6nGK9HGRuLR3QO4bqKwQwszEKoH9SMSOrO8FnQT4uO1P3f+6xga", - "XRh6I7BrNy2Cvf8QS4Pg21a88WLftEM4m0O+zjnOtqCw49U/9Z5USzXieiLnnPaP9zgVQprOZ3U0diSH", - "dbQZ19nT3dP/BgAA//8Nu8Jt11IAAA==", + "H4sIAAAAAAAC/9RYX2/bOBL/KgTvHhXbufbuwW+5BO2l18MFSRd5aIOAJsc2a4pUhyMn3sDffUFSkiVL", + "TtbbdLF9MWRx/v5mOH/0xKXLC2fBkufTJ+7lEnIRH88RBMGNRGfMNXwrwVN4XaArAElDJBJIei5kPFHg", + "JeqCtLN8yv9/fsnqU4YwBwQrgTlkxklhmI+CWSFoyTMOjyIvDPBpI9GPFJZajRaLMYGn+DMNPzzjtCkC", + "qSfUdsG3GVeCxD06N2RHfBCGKQG5sydJe1AbbJkJuQKrgoFMOktCW20XDEtLOgcW5DKlESQ53IzY5Zy5", + "XBOByphguSBALYz+tc1Ti8zFhhWlMYyW0CAxGrJeq77ZF1AgSEGgmDBaeDZ3yKzIYcQaj5Irtd4KUK3G", + "kawD6lc380OaA+UzkD2roANG8DEBzBSgXoNnmtgcXV6xjTYiN+z3W5a4XjOmbTuEVbU7vgDJ5tqA/zHh", + "3WYc4VupERSfft5dmLuG0s2+gqTg9AUYIFDpzvUvW8qTPlQkqIwEO1hVkvSiOTqQVAKGLPoPCEPLa/CF", + "sx76JuVODSTQeYkIltgycrMkn0XaduzdaijyBboFgvd9sVfVCSsAJVgSC4jXwjihQoiCYSLQhoyaO8wF", + "8SmfGydC0cjFo87LnE9PJ5OM59qmf5PGBFvmM8AKUaR7JWjAt9sl2Ha6R9qIdKMxMJ6EZOEZt6UxYhbc", + "JSzhpXBEiIbicJ2y78oh3TTh7oZi5krbTpCZcwaEDdxL5+leF4PZE88Kh9Q61ZZgkaBYARRnRq/hE4r5", + "XMtBGUZ4uheS9FrT5l5EUYNoHCxA/Sw4aFGBToIq8QAfOnLSmcFDfLyfbSjB1dinLf3r7c62libvSpTD", + "aqiHRgvu6vAoXTWPWz0v80Fb5R6GbTrGu73Ei3FoY1tFoAVoVmXYzvkGoWcy9lAtaw8OPU+ky3NhVSJU", + "Sqdqf9US0LlMO6UyzizqqBTszA4HenPvtXuwgPeHCnLqXQdze6+3PVfQbShRn2u/eMaxtKGbxartiiK+", + "A0SHPGuK/t2Al2WhjkRmqFM0Ues60Qax637jTCc2HXP6yRMgAFmips1NmEer+gYCAc9KWu7+vav9+HD7", + "KQLQrtQfbj8xciuwaQ7RCixp2rAC3VorwGhqEB9uWhS3A2FJVMQ7FfhrnV3xN0uHdBJKo2LfSsBNrcwh", + "u4XZjZMroDCAWJB1U9KBMRLzuvYlFTvNotD/hQ3fBhS0nbugOE4x8a5s9528CEMyO/94yYworVyCj8NN", + "LqxYgK/nH8CTOLc0U48oCqNlapYZM3oFX+xChCEPcA3oszj6zoQHn0WBDzCrz0ZformaYhNvDOAZD6fJ", + "rMnodDSJF6UAKwrNp/xNfJXxMKbFgI5Focfr03GaEsKbBQwMe++B6l7bmSd4FJ4a/qVKhGlcifEK2Zum", + "lqjsH5NJjSTYqKUFwfirD6rq9Sc8/R1hzqf8b+PdfjSulqPx3lAUQ9W1OVFsAgD/nLz5ExXfAK61BFZa", + "sRY6TR7xPpV5LnBTwbmPI4mFD3e8ikS4zgFvfhdY6zClzPGtOHXh/6g93VQ03wm+Jsj9S2B0G8x2d4EQ", + "xWYIm+vOHuP3cAnm7606bWiqkzY2oUX6ASDaizNPRRQ8/dupzaslwtBuvu1W7NAht704nL6aCXvwvwQ3", + "q1tYF/XkyB7uz8PeT8nxk1bbVDhCA+xHJO1VTUQKgSIHAgwqnlJRrr5DVDU5NrsullkLl/1OefcDi013", + "J3wZ53oI2Gb87eRtv5zukVtHbB7num5gktqjApMN14X3QD8n8kdm+PciHgrzK9yDcT08j5+qp3g1hkvV", + "dWmTe+eJ9EeEKBsUIhuFP0mwf0kT636IvjPo16Xd/762Q+YPBD8sbYdbdHMVryLZX/A+HtP5Wx9Dju7+", + "LABVD0CvfnE70p+PY9x14lgdI3BouD+7CvN1iYZP+ZgHmCuhva0kGZDm/xwsxem9yikGjyDLSNkEuMnj", + "fUkf3YJ5QhC5tosoBYFQw1qYHbdxCz/AW5WTsBeV0DJmxxhPBjgHN6eoPe5JfifhAWY+Ug5ICanBwvYU", + "1kPtbPrgW8ejEhA/cPR505DN5BLkyg8yVmNyn/V/pSF9UuVAnRJD3teZ0BdxkTYdo+cgN9IMs1fp0+d+", + "pw2wB0FyWcdMwRqMK2ImVB9ga/wC2YCMM2sdJdTmQZyQEnzLe9Gce7692/4WAAD//++SYFRCGgAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/internal/api/generated_test.go b/internal/api/generated_test.go new file mode 100644 index 00000000..be487065 --- /dev/null +++ b/internal/api/generated_test.go @@ -0,0 +1,12 @@ +package api + +import ( + "reflect" + "testing" +) + +func TestCreateScrollRequestHasNoRuntime(t *testing.T) { + if _, ok := reflect.TypeOf(CreateScrollRequest{}).FieldByName("Runtime"); ok { + t.Fatal("CreateScrollRequest should not expose runtime") + } +} diff --git a/internal/core/domain/command_status.go b/internal/core/domain/command_status.go new file mode 100644 index 00000000..a3f6ad07 --- /dev/null +++ b/internal/core/domain/command_status.go @@ -0,0 +1,33 @@ +package domain + +type ScrollLockStatus string + +const ( + ScrollLockStatusRunning ScrollLockStatus = "running" + ScrollLockStatusDone ScrollLockStatus = "done" + ScrollLockStatusError ScrollLockStatus = "error" + ScrollLockStatusWaiting ScrollLockStatus = "waiting" +) + +type LockStatus struct { + Status ScrollLockStatus `json:"status"` + ExitCode *int `json:"exit_code"` + LastStatusChange int64 `json:"last_status_change"` +} + +type CommandExecutionError struct { + Command string + ExitCode int + Err error +} + +func (e *CommandExecutionError) Error() string { + if e.Err == nil { + return "command failed" + } + return e.Err.Error() +} + +func (e *CommandExecutionError) Unwrap() error { + return e.Err +} diff --git a/internal/core/domain/console.go b/internal/core/domain/console.go index 2855bb8e..c2d9ff95 100644 --- a/internal/core/domain/console.go +++ b/internal/core/domain/console.go @@ -3,14 +3,15 @@ package domain type ConsoleType string const ( - ConsoleTypeTTY ConsoleType = "tty" - ConsoleTypeProcess ConsoleType = "process" - ConsoleTypePlugin ConsoleType = "plugin" + ConsoleTypeTTY ConsoleType = "tty" + ConsoleTypeContainer ConsoleType = "container" ) type Console struct { Channel *BroadcastChannel `json:"-" validate:"required"` + WriteInput func(data string) error `json:"-"` + Type ConsoleType `json:"type" validate:"required"` InputMode string `json:"inputMode" validate:"required"` diff --git a/internal/core/domain/log.go b/internal/core/domain/log.go new file mode 100644 index 00000000..43536dc3 --- /dev/null +++ b/internal/core/domain/log.go @@ -0,0 +1,10 @@ +package domain + +import "container/list" + +type Log struct { + List *list.List + Capacity uint + Req chan chan<- []byte + Write chan<- []byte +} diff --git a/internal/core/domain/process.go b/internal/core/domain/process.go deleted file mode 100644 index c01d08b8..00000000 --- a/internal/core/domain/process.go +++ /dev/null @@ -1,90 +0,0 @@ -package domain - -import ( - "container/list" - "errors" - "io" - "os" - "os/exec" - "syscall" - - processutil "github.com/shirou/gopsutil/process" -) - -type Process struct { - Cmd *exec.Cmd `json:"-"` - Name string `json:"name"` - Type string `json:"type"` - StdIn io.WriteCloser `json:"-"` -} - -type Log struct { - List *list.List - Capacity uint - Req chan chan<- []byte - Write chan<- []byte -} - -type ProcessMonitorMetrics struct { - Cpu float64 - Memory int - Connections []string - Pid int -} // @name ProcessMonitorMetrics - -func (process *Process) Stop() error { - if process.Cmd == nil { - return nil - } - //TODO: stop process - return process.Cmd.Process.Signal(syscall.SIGKILL) -} - -func (process *Process) Kill() error { - if process.Cmd == nil { - return nil - } - return process.Cmd.Process.Kill() -} - -func (process *Process) Status() *os.Process { - return process.Cmd.Process -} - -type ProcessTreeRoot struct { - Root *ProcessTreeNode `json:"root"` - TotalMemoryRss uint64 `json:"total_memory_rss"` - TotalMemoryVms uint64 `json:"total_memory_vms"` - TotalMemorySwap uint64 `json:"total_memory_swap"` - TotalIoCountersRead uint64 `json:"total_io_counters_read"` - TotalIoCountersWrite uint64 `json:"total_io_counters_write"` - TotalCpuPercent float64 `json:"total_cpu_percent"` - TotalProcessCount uint `json:"total_process_count"` -} // @name ProcessTreeRoot - -type ProcessTreeNode struct { - Process *processutil.Process `json:"process"` - Memory *processutil.MemoryInfoStat `json:"memory"` - MemoryEx *processutil.MemoryInfoExStat `json:"memory_ex"` - IOCounters *processutil.IOCountersStat `json:"io_counters"` - CpuPercent float64 `json:"cpu_percent"` - Name string `json:"name"` - Gids []int32 `json:"gids"` - Username string `json:"username"` - Cmdline string `json:"cmdline"` - Children []*ProcessTreeNode `json:"children"` -} // @name ProcessTreeNode - -func (process *Process) GetProcess() (*processutil.Process, error) { - - var status = process.Cmd.Process - if status == nil || status.Pid < 0 { - return nil, errors.New("process not initialized") - } - exists, _ := processutil.PidExists(int32(status.Pid)) - if !exists { - process.Stop() - return nil, errors.New("process not running") - } - return processutil.NewProcess(int32(status.Pid)) -} diff --git a/internal/core/domain/queue_item.go b/internal/core/domain/queue_item.go index fadf3234..8fcdea92 100644 --- a/internal/core/domain/queue_item.go +++ b/internal/core/domain/queue_item.go @@ -4,7 +4,6 @@ type QueueItem struct { Name string Status ScrollLockStatus Error error - UpdateLockStatus bool RunAfterExecution func() DoneChan chan struct{} RestartCount uint diff --git a/internal/core/domain/runtime.go b/internal/core/domain/runtime.go new file mode 100644 index 00000000..fc886188 --- /dev/null +++ b/internal/core/domain/runtime.go @@ -0,0 +1,40 @@ +package domain + +const DefaultExecImage = "bash:latest" + +const RuntimeDataDir = "data" +const RuntimeConfigDir = ".druid" +const RuntimeConfigFile = "runtime.json" + +type RuntimeConfig struct { + SchemaVersion string `json:"schemaVersion"` + Scroll RuntimeConfigScroll `json:"scroll"` + Paths RuntimeConfigPaths `json:"paths"` + Ports []Port `json:"ports"` + ExpectedPorts []RuntimeExpectedPort `json:"expectedPorts,omitempty"` + Runtime RuntimeConfigRuntime `json:"runtime"` +} + +type RuntimeConfigScroll struct { + ID string `json:"id"` + Name string `json:"name"` + Artifact string `json:"artifact"` +} + +type RuntimeConfigPaths struct { + Data string `json:"data"` + RuntimeConfig string `json:"runtimeConfig"` +} + +type RuntimeExpectedPort struct { + Name string `json:"name"` + Procedure string `json:"procedure"` + Port int `json:"port"` + Protocol string `json:"protocol"` + KeepAliveTraffic string `json:"keepAliveTraffic,omitempty"` +} + +type RuntimeConfigRuntime struct { + Backend string `json:"backend"` + GeneratedAt string `json:"generatedAt"` +} diff --git a/internal/core/domain/runtime_scroll.go b/internal/core/domain/runtime_scroll.go new file mode 100644 index 00000000..b2154143 --- /dev/null +++ b/internal/core/domain/runtime_scroll.go @@ -0,0 +1,31 @@ +package domain + +import "time" + +type RuntimeScrollStatus string + +const ( + RuntimeScrollStatusCreated RuntimeScrollStatus = "created" + RuntimeScrollStatusRunning RuntimeScrollStatus = "running" + RuntimeScrollStatusStopped RuntimeScrollStatus = "stopped" + RuntimeScrollStatusError RuntimeScrollStatus = "error" + RuntimeScrollStatusDeleted RuntimeScrollStatus = "deleted" +) + +type RuntimeScroll struct { + ID string `json:"id"` + OwnerID string `json:"owner_id,omitempty"` + Artifact string `json:"artifact"` + ScrollRoot string `json:"scroll_root"` + DataRoot string `json:"data_root"` + ScrollName string `json:"scroll_name"` + ScrollYAML string `json:"-"` + Status RuntimeScrollStatus `json:"status"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Commands map[string]LockStatus `json:"commands,omitempty"` +} + +type RuntimeState struct { + Scrolls map[string]*RuntimeScroll `json:"scrolls"` +} diff --git a/internal/core/domain/scroll.go b/internal/core/domain/scroll.go index 9b89a87d..5c56a0f6 100644 --- a/internal/core/domain/scroll.go +++ b/internal/core/domain/scroll.go @@ -4,6 +4,10 @@ import ( "fmt" "io" "os" + "path/filepath" + "regexp" + "strconv" + "strings" "time" semver "github.com/Masterminds/semver/v3" @@ -46,7 +50,6 @@ type Port struct { Vars []ColdStarterVars `yaml:"vars" json:"vars"` StartDelay uint `yaml:"start_delay" json:"start_delay"` FinishAfterCommand string `yaml:"finish_after_command" json:"finish_after_command"` - CheckActivity bool `yaml:"check_activity" json:"check_activity"` Description string `yaml:"description,omitempty" json:"description,omitempty"` } @@ -58,19 +61,16 @@ type AugmentedPort struct { } type File struct { - Name string `yaml:"name" json:"name"` - Desc string `yaml:"desc" json:"desc"` - PullChannel map[string]string `yaml:"pull_channel" json:"pull_channel"` - Version *semver.Version `yaml:"version" json:"version"` - AppVersion string `yaml:"app_version" json:"app_version"` //don't make this a semver, it's not allways - Init string `yaml:"init" json:"init"` - Serve string `yaml:"serve" json:"serve"` - Ports []Port `yaml:"ports" json:"ports"` - KeepAlivePPM uint `yaml:"keepAlivePPM" json:"keepAlivePPM"` - Commands map[string]*CommandInstructionSet `yaml:"commands" json:"commands"` - Plugins map[string]map[string]string `yaml:"plugins" json:"plugins"` - Cronjobs []*Cronjob `yaml:"cronjobs" json:"cronjobs"` - Chunks []*Chunks `yaml:"chunks" json:"chunks"` + Name string `yaml:"name" json:"name"` + Desc string `yaml:"desc" json:"desc"` + PullChannel map[string]string `yaml:"pull_channel" json:"pull_channel"` + Version *semver.Version `yaml:"version" json:"version"` + AppVersion string `yaml:"app_version" json:"app_version"` //don't make this a semver, it's not allways + Serve string `yaml:"serve" json:"serve"` + Ports []Port `yaml:"ports" json:"ports"` + Commands map[string]*CommandInstructionSet `yaml:"commands" json:"commands"` + Cronjobs []*Cronjob `yaml:"cronjobs" json:"cronjobs"` + Chunks []*Chunks `yaml:"chunks" json:"chunks"` } type Scroll struct { @@ -78,19 +78,125 @@ type Scroll struct { scrollDir string } +type ProcedureType string + +const ( + ProcedureTypeContainer ProcedureType = "container" + ProcedureTypeSignal ProcedureType = "signal" +) + type Procedure struct { - Mode string `yaml:"mode" json:"mode"` - Id *string `yaml:"id" json:"id"` - Wait interface{} `yaml:"wait" json:"wait"` - Data interface{} `yaml:"data" json:"data"` - IgnoreFailure bool `yaml:"ignore_failure" json:"ignore_failure"` + Type ProcedureType `yaml:"type,omitempty" json:"type,omitempty"` + Id *string `yaml:"id,omitempty" json:"id,omitempty"` + IgnoreFailure bool `yaml:"ignore_failure" json:"ignore_failure"` + Image string `yaml:"image,omitempty" json:"image,omitempty"` + Command []string `yaml:"command,omitempty" json:"command,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + Env map[string]string `yaml:"env,omitempty" json:"env,omitempty"` + ExpectedPorts []ExpectedPort `yaml:"expectedPorts,omitempty" json:"expectedPorts,omitempty"` + Mounts []Mount `yaml:"mounts,omitempty" json:"mounts,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Signal string `yaml:"signal,omitempty" json:"signal,omitempty"` + TTY bool `yaml:"tty,omitempty" json:"tty,omitempty"` + + Mode string `yaml:"mode,omitempty" json:"-"` + Wait interface{} `yaml:"wait,omitempty" json:"-"` + Data interface{} `yaml:"data,omitempty" json:"-"` +} + +func (p *Procedure) Kind() ProcedureType { + if p.Type == "" { + return ProcedureTypeContainer + } + return p.Type +} + +func (p *Procedure) IsContainer() bool { + return p.Kind() == ProcedureTypeContainer +} + +func (p *Procedure) IsSignal() bool { + return p.Kind() == ProcedureTypeSignal +} + +func (p *Procedure) hasContainerFields() bool { + return p.Image != "" || + len(p.Command) > 0 || + p.WorkingDir != "" || + len(p.Env) > 0 || + len(p.ExpectedPorts) > 0 || + len(p.Mounts) > 0 || + p.TTY +} + +type Mount struct { + Path string `yaml:"path" json:"path"` + SubPath string `yaml:"sub_path,omitempty" json:"sub_path,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` +} + +type ExpectedPort struct { + Name string `yaml:"name" json:"name"` + KeepAliveTraffic string `yaml:"keepAliveTraffic,omitempty" json:"keepAliveTraffic,omitempty"` +} + +type TrafficThreshold struct { + Bytes uint64 + Window time.Duration +} + +type RuntimePortStatus struct { + Name string `json:"name"` + Procedure string `json:"procedure"` + Port int `json:"port"` + Protocol string `json:"protocol"` + Bound bool `json:"bound"` + HostIP string `json:"host_ip,omitempty"` + HostPort int `json:"host_port,omitempty"` + Traffic bool `json:"traffic"` + TrafficBytes *uint64 `json:"traffic_bytes,omitempty"` + RXBytes *uint64 `json:"rx_bytes,omitempty"` + TXBytes *uint64 `json:"tx_bytes,omitempty"` + KeepAliveTraffic string `json:"keepAliveTraffic,omitempty"` + TrafficWindow string `json:"traffic_window,omitempty"` + TrafficOK *bool `json:"traffic_ok,omitempty"` + LastActivityAt *time.Time `json:"last_activity_at,omitempty"` + Source string `json:"source"` +} + +var trafficThresholdPattern = regexp.MustCompile(`(?i)^([0-9]+)(b|kb|mb|gb)/(.+)$`) + +func ParseKeepAliveTraffic(value string) (*TrafficThreshold, error) { + if value == "" { + return nil, nil + } + matches := trafficThresholdPattern.FindStringSubmatch(strings.TrimSpace(value)) + if len(matches) != 4 { + return nil, fmt.Errorf("invalid keepAliveTraffic %q, expected format like 10kb/5m", value) + } + amount, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid keepAliveTraffic amount %q: %w", matches[1], err) + } + switch strings.ToLower(matches[2]) { + case "kb": + amount *= 1000 + case "mb": + amount *= 1000 * 1000 + case "gb": + amount *= 1000 * 1000 * 1000 + } + window, err := time.ParseDuration(matches[3]) + if err != nil || window <= 0 { + return nil, fmt.Errorf("invalid keepAliveTraffic window %q", matches[3]) + } + return &TrafficThreshold{Bytes: amount, Window: window}, nil } type CommandInstructionSet struct { - Dependencies []string `yaml:"dependencies,omitempty" json:"dependencies,omitempty"` - Procedures []*Procedure `yaml:"procedures" json:"procedures"` - Needs []string `yaml:"needs,omitempty" json:"needs,omitempty"` - Run RunMode `yaml:"run,omitempty" json:"run,omitempty"` + Procedures []*Procedure `yaml:"procedures" json:"procedures"` + Needs []string `yaml:"needs,omitempty" json:"needs,omitempty"` + Run RunMode `yaml:"run,omitempty" json:"run,omitempty"` } var ErrScrollDoesNotExist = fmt.Errorf("scroll does not exist") @@ -122,6 +228,16 @@ func NewScroll(scrollDir string) (*Scroll, error) { return &scroll, nil } +func NewScrollFromBytes(scrollDir string, file []byte) (*Scroll, error) { + scroll := Scroll{ + scrollDir: scrollDir, + } + if _, err := scroll.ParseFile(file); err != nil { + return nil, err + } + return &scroll, nil +} + func (sc *Scroll) ParseFile(file []byte) (*Scroll, error) { valueReplacedScroll := os.ExpandEnv(string(file)) @@ -132,19 +248,9 @@ func (sc *Scroll) ParseFile(file []byte) (*Scroll, error) { } sc.File = f - sc.migrateInitToServe() return sc, nil } -func (sc *Scroll) migrateInitToServe() { - if sc.Serve == "" && sc.Init != "" { - logger.Log().Warn("scroll.init is deprecated, use scroll.serve instead") - sc.Serve = sc.Init - } else if sc.Serve != "" && sc.Init != "" { - logger.Log().Warn("both scroll.init and scroll.serve are set, scroll.init will be ignored") - } -} - func (sc *Scroll) Validate(strict bool) error { if sc.Name == "" { return fmt.Errorf("scroll name is required") @@ -158,14 +264,22 @@ func (sc *Scroll) Validate(strict bool) error { if sc.AppVersion == "" { return fmt.Errorf("scroll app_version is required") } - if sc.Serve == "" { - return fmt.Errorf("scroll serve is required") - } if len(sc.Commands) == 0 { return fmt.Errorf("scroll commands are required") } + if sc.Serve != "" { + if _, ok := sc.Commands[sc.Serve]; !ok { + return fmt.Errorf("scroll serve command %s is not defined", sc.Serve) + } + } ids := make(map[string]bool) + portsByName := make(map[string]bool, len(sc.Ports)) + for _, port := range sc.Ports { + if port.Name != "" { + portsByName[port.Name] = true + } + } for cmd, cis := range sc.Commands { if cmd == "" { return fmt.Errorf("command name is required") @@ -177,8 +291,72 @@ func (sc *Scroll) Validate(strict bool) error { return fmt.Errorf("command procedures are required") } for _, p := range cis.Procedures { - if p.Mode == "" { - return fmt.Errorf("procedure mode is required") + if p == nil { + return fmt.Errorf("procedure is required") + } + if p.Mode != "" { + return fmt.Errorf("procedure uses legacy mode %q; use type: container or type: signal", p.Mode) + } + if p.Wait != nil { + return fmt.Errorf("procedure uses legacy wait; waits are no longer supported") + } + if p.Data != nil { + return fmt.Errorf("procedure uses legacy data; use container command fields or type: signal") + } + switch p.Kind() { + case ProcedureTypeContainer: + if p.Image == "" { + return fmt.Errorf("container procedure image is required") + } + if p.Target != "" || p.Signal != "" { + return fmt.Errorf("container procedure cannot set target or signal; use type: signal") + } + mountPaths := map[string]bool{} + for _, mount := range p.Mounts { + if mount.Path == "" { + return fmt.Errorf("mount path is required") + } + if !filepath.IsAbs(mount.Path) { + return fmt.Errorf("mount path %s must be absolute", mount.Path) + } + if mountPaths[mount.Path] { + return fmt.Errorf("mount path %s is duplicated", mount.Path) + } + mountPaths[mount.Path] = true + if mount.SubPath == "" { + continue + } + if filepath.IsAbs(mount.SubPath) { + return fmt.Errorf("mount sub_path %s must be relative", mount.SubPath) + } + clean := filepath.Clean(mount.SubPath) + if clean == ".." || strings.HasPrefix(clean, "../") { + return fmt.Errorf("mount sub_path %s escapes data root", mount.SubPath) + } + } + for _, expectedPort := range p.ExpectedPorts { + if expectedPort.Name == "" { + return fmt.Errorf("expected port name is required") + } + if !portsByName[expectedPort.Name] { + return fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) + } + if _, err := ParseKeepAliveTraffic(expectedPort.KeepAliveTraffic); err != nil { + return err + } + } + case ProcedureTypeSignal: + if p.Target == "" { + return fmt.Errorf("signal procedure target is required") + } + if p.Signal == "" { + return fmt.Errorf("signal procedure signal is required") + } + if p.hasContainerFields() { + return fmt.Errorf("signal procedure cannot set container fields") + } + default: + return fmt.Errorf("unsupported procedure type %q", p.Type) } if p.Id == nil { continue @@ -190,8 +368,14 @@ func (sc *Scroll) Validate(strict bool) error { } } //scan for files in sc.scrollDir + if sc.scrollDir == "" { + return nil + } entries, err := os.ReadDir(sc.scrollDir) if err != nil { + if !strict && os.IsNotExist(err) { + return nil + } return fmt.Errorf("failed to read scroll directory - %w", err) } for _, entry := range entries { @@ -222,8 +406,6 @@ func (sc *Scroll) GetColdStartPorts() []Port { return sc.Ports } -const ScrollConfigFile = "scroll-config.yml" -const ScrollConfigTemplate = ScrollConfigFile + ".scroll_template" const ScrollDataDir = "data" // DataLoadedMarkerFile is created under the scroll data directory after a successful @@ -231,13 +413,9 @@ const ScrollDataDir = "data" const DataLoadedMarkerFile = ".data-loaded" var ScrollFiles = map[string]ArtifactType{ - "update": ArtifactTypeScrollFs, - "scroll.yaml": ArtifactTypeScrollFs, - "packet_handler": ArtifactTypeScrollFs, - "public": ArtifactTypeScrollFs, - "private": ArtifactTypeScrollFs, - "scroll-config.yml.scroll_template": ArtifactTypeScrollFs, - "data": ArtifactTypeScrollData, - "scroll-lock.json": ArtifactTypeScrollData, - "scroll-config.yml": ArtifactTypeScrollData, + "update": ArtifactTypeScrollFs, + "scroll.yaml": ArtifactTypeScrollFs, + "public": ArtifactTypeScrollFs, + "private": ArtifactTypeScrollFs, + "data": ArtifactTypeScrollData, } diff --git a/internal/core/domain/scroll_lock.go b/internal/core/domain/scroll_lock.go deleted file mode 100644 index 16837055..00000000 --- a/internal/core/domain/scroll_lock.go +++ /dev/null @@ -1,76 +0,0 @@ -package domain - -import ( - "encoding/json" - "os" - "time" - - "github.com/Masterminds/semver/v3" -) - -type ScrollLockStatus string - -const ( - ScrollLockStatusRunning ScrollLockStatus = "running" - ScrollLockStatusDone ScrollLockStatus = "done" - ScrollLockStatusError ScrollLockStatus = "error" - ScrollLockStatusWaiting ScrollLockStatus = "waiting" -) - -type LockStatus struct { - Status ScrollLockStatus `json:"status"` - ExitCode *int `json:"exit_code"` - LastStatusChange int64 `json:"last_status_change"` -} - -type ScrollLock struct { - Statuses map[string]LockStatus `json:"statuses"` - ScrollVersion *semver.Version `json:"scroll_version"` - ScrollName string `json:"scroll_name"` - path string -} // @name ScrollLock - -func (scrollLock *ScrollLock) Write() error { - data, err := json.Marshal(scrollLock) - if err != nil { - return err - } - os.WriteFile(scrollLock.path, data, 0755) - return nil -} - -func ReadLock(path string) (*ScrollLock, error) { - lock := &ScrollLock{} - - scrollRaw, _ := os.ReadFile(path) - err := json.Unmarshal(scrollRaw, &lock) - if err != nil { - return nil, err - } - lock.path = path - return lock, nil -} - -func WriteNewScrollLock(path string) *ScrollLock { - lock := &ScrollLock{ - Statuses: make(map[string]LockStatus), - path: path, - } - lock.Write() - return lock -} - -func (scrollLock *ScrollLock) GetStatus(command string) LockStatus { - return scrollLock.Statuses[command] -} - -func (scrollLock *ScrollLock) SetStatus(command string, status ScrollLockStatus, exitCode *int) { - lockStatus := LockStatus{ - Status: status, - LastStatusChange: time.Now().Unix(), - ExitCode: exitCode, - } - - scrollLock.Statuses[command] = lockStatus - scrollLock.Write() -} diff --git a/internal/core/domain/scroll_test.go b/internal/core/domain/scroll_test.go new file mode 100644 index 00000000..63b9fd30 --- /dev/null +++ b/internal/core/domain/scroll_test.go @@ -0,0 +1,124 @@ +package domain + +import ( + "strings" + "testing" + + semver "github.com/Masterminds/semver/v3" +) + +func TestProcedureDefaultsToContainer(t *testing.T) { + procedure := &Procedure{ + Image: "alpine:3.20", + Command: []string{"echo", "hello"}, + } + + if procedure.Kind() != ProcedureTypeContainer { + t.Fatalf("Kind() = %s, want %s", procedure.Kind(), ProcedureTypeContainer) + } + + scroll := testScroll(t, procedure) + if err := scroll.Validate(false); err != nil { + t.Fatalf("Validate() error = %v", err) + } +} + +func TestSignalProcedureValidation(t *testing.T) { + scroll := testScroll(t, &Procedure{ + Type: ProcedureTypeSignal, + Target: "start", + Signal: "SIGTERM", + }) + + if err := scroll.Validate(false); err != nil { + t.Fatalf("Validate() error = %v", err) + } +} + +func TestLegacyProcedureFieldsRejected(t *testing.T) { + tests := []struct { + name string + procedure *Procedure + want string + }{ + { + name: "mode", + procedure: &Procedure{Mode: "scroll-switch"}, + want: "legacy mode", + }, + { + name: "wait", + procedure: &Procedure{Image: "alpine:3.20", Wait: false}, + want: "legacy wait", + }, + { + name: "data", + procedure: &Procedure{Image: "alpine:3.20", Data: "start"}, + want: "legacy data", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scroll := testScroll(t, tt.procedure) + err := scroll.Validate(false) + if err == nil { + t.Fatal("Validate() error = nil, want error") + } + if !strings.Contains(err.Error(), tt.want) { + t.Fatalf("Validate() error = %q, want containing %q", err.Error(), tt.want) + } + }) + } +} + +func TestScrollValidateAllowsMissingServe(t *testing.T) { + scroll := testScroll(t, &Procedure{ + Image: "alpine:3.20", + Command: []string{"true"}, + }) + scroll.Serve = "" + + if err := scroll.Validate(false); err != nil { + t.Fatalf("Validate() error = %v", err) + } +} + +func TestScrollValidateRejectsUnknownServeCommand(t *testing.T) { + scroll := testScroll(t, &Procedure{ + Image: "alpine:3.20", + Command: []string{"true"}, + }) + scroll.Serve = "missing" + + err := scroll.Validate(false) + if err == nil { + t.Fatal("Validate() error = nil, want error") + } + if !strings.Contains(err.Error(), "serve command missing is not defined") { + t.Fatalf("Validate() error = %q, want missing serve command", err.Error()) + } +} + +func testScroll(t *testing.T, procedure *Procedure) *Scroll { + t.Helper() + version, err := semver.NewVersion("0.1.0") + if err != nil { + t.Fatal(err) + } + return &Scroll{ + File: File{ + Name: "test-scroll", + Desc: "test scroll", + Version: version, + AppVersion: "1.0.0", + Serve: "start", + Commands: map[string]*CommandInstructionSet{ + "start": { + Procedures: []*Procedure{procedure}, + }, + }, + }, + scrollDir: t.TempDir(), + } +} diff --git a/internal/core/ports/handler_ports.go b/internal/core/ports/handler_ports.go deleted file mode 100644 index fed72c70..00000000 --- a/internal/core/ports/handler_ports.go +++ /dev/null @@ -1,66 +0,0 @@ -package ports - -import ( - "github.com/gofiber/contrib/websocket" - "github.com/gofiber/fiber/v2" -) - -type ScrollHandlerInterface interface { - GetScroll(c *fiber.Ctx) error - RunCommand(c *fiber.Ctx) error - RunProcedure(c *fiber.Ctx) error - GetProcedures(c *fiber.Ctx) error - AddCommand(c *fiber.Ctx, command string) error -} - -type ScrollLogHandlerInterface interface { - ListAllLogs(c *fiber.Ctx) error - ListStreamLogs(c *fiber.Ctx, stream string) error -} - -type ScrollMetricHandlerInterface interface { - GetMetrics(c *fiber.Ctx) error - GetPsTree(c *fiber.Ctx) error -} - -type AnnotationHandlerInterface interface { - Annotations(c *fiber.Ctx) error -} - -type WebsocketHandlerInterface interface { - CreateToken(c *fiber.Ctx) error - HandleProcess(c *websocket.Conn) - GetConsoles(c *fiber.Ctx) error -} - -type ProcessHandlerInterface interface { - GetProcesses(c *fiber.Ctx) error -} - -type QueueHandlerInterface interface { - GetQueue(c *fiber.Ctx) error -} - -type PortHandlerInterface interface { - GetPorts(c *fiber.Ctx) error - AddPort(c *fiber.Ctx) error - DeletePort(c *fiber.Ctx, port int) error -} -type HealthHandlerInterface interface { - GetHealthAuth(c *fiber.Ctx) error -} - -type ColdstarterHandlerInterface interface { - FinishColdstarter(c *fiber.Ctx) error -} - -type SignalHandlerInterface interface { - StopDaemon(c *fiber.Ctx) error -} - -type WatchHandlerInterface interface { - EnableWatch(c *fiber.Ctx) error - DisableWatch(c *fiber.Ctx) error - GetWatchStatus(c *fiber.Ctx) error - NotifyChange(c *websocket.Conn) -} diff --git a/internal/core/ports/services_ports.go b/internal/core/ports/services_ports.go index df075332..c44fb376 100644 --- a/internal/core/ports/services_ports.go +++ b/internal/core/ports/services_ports.go @@ -19,41 +19,46 @@ type AuthorizerServiceInterface interface { type ScrollServiceInterface interface { GetCurrent() *domain.Scroll GetFile() *domain.File - GetScrollConfigRawYaml() []byte GetDir() string GetCwd() string - WriteNewScrollLock() *domain.ScrollLock - GetLock() (*domain.ScrollLock, error) GetCommand(cmd string) (*domain.CommandInstructionSet, error) - AddTemporaryCommand(cmd string, instructions *domain.CommandInstructionSet) } type ProcedureLauchnerInterface interface { - LaunchPlugins() error - RunProcedure(*domain.Procedure, string, []string) (string, *int, error) - Run(cmd string, runCommandCb func(cmd string) error) error + Run(cmd string) error GetProcedureStatuses() map[string]domain.ScrollLockStatus } -type PluginManagerInterface interface { - CanRunStandaloneProcedure(mode string) bool - GetNotifyConsoleChannel() chan *domain.StreamItem - ParseFromScroll(pluginDefinitionMap map[string]map[string]string, config string, cwd string) error - HasMode(mode string) bool - RunProcedure(mode string, value string) (string, error) -} - type LogManagerInterface interface { GetStreams() map[string]*domain.Log AddLine(stream string, sc []byte) } -type ProcessManagerInterface interface { - GetRunningProcesses() map[string]*domain.Process - GetRunningProcess(commandName string) *domain.Process - Run(commandName string, command []string, dir string) (*int, error) - RunTty(comandName string, command []string, dir string) (*int, error) - WriteStdin(process *domain.Process, data string) error +type RuntimeBackendInterface interface { + Name() string + ReadScrollFile(scrollRoot string) ([]byte, error) + RunCommand(command RuntimeCommand) (*int, error) + ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) + Attach(commandName string, data string) error + Signal(commandName string, target string, signal string, dataRoot string) error +} + +type RuntimeCommand struct { + Name string + Command *domain.CommandInstructionSet + DataRoot string + GlobalPorts []domain.Port +} + +type RuntimeMaterialization struct { + Artifact string + ScrollRoot string + DataRoot string + ScrollYAML []byte +} + +type RuntimeMaterializerInterface interface { + MaterializeScroll(ctx context.Context, artifact string, requestedName string) (*RuntimeMaterialization, error) } type BroadcastChannelInterface interface { @@ -67,18 +72,6 @@ type ConsoleManagerInterface interface { AddConsoleWithChannel(consoleId string, consoleType domain.ConsoleType, inputMode string, channel chan string) (*domain.Console, chan struct{}) } -type ProcessMonitorInterface interface { - GetAllProcessesMetrics() map[string]*domain.ProcessMonitorMetrics - GetPsTrees() map[string]*domain.ProcessTreeRoot - AddProcess(pid int32, name string) - RemoveProcess(name string) -} - -type TemplateRendererInterface interface { - RenderTemplate(templatePath string, data interface{}) (string, error) - RenderScrollTemplateFiles(templateBase string, templateFiles []string, data interface{}, ouputPath string) error -} - type OciRegistryInterface interface { GetRepo(repoUrl string) (*remote.Repository, error) Pull(dir string, artifact string) error @@ -100,13 +93,7 @@ type QueueManagerInterface interface { } type PortServiceInterface interface { - StartMonitoring(context.Context, []string, uint) - GetLastActivity(port int) uint - CheckOpen(prot int) bool GetPorts() []*domain.AugmentedPort - MandatoryPortsOpen() bool - AddPort(port domain.Port) (*domain.AugmentedPort, error) - RemovePort(port int) error } type ColdStarterHandlerInterface interface { @@ -143,8 +130,3 @@ type WatchServiceInterface interface { IsWatching() bool SetHotReloadCommands(procs []string) error } - -type NixDependencyServiceInterface interface { - GetCommand(cmd []string, deps []string) []string - EnsureNixInstalled() error -} diff --git a/internal/core/services/coldstarter.go b/internal/core/services/coldstarter.go index fc1d5803..bf7265ed 100644 --- a/internal/core/services/coldstarter.go +++ b/internal/core/services/coldstarter.go @@ -3,6 +3,7 @@ package services import ( "context" "fmt" + "path/filepath" "sync" "time" @@ -89,7 +90,7 @@ func (c *ColdStarter) Serve(ctx context.Context) { sleepHandler = *port.SleepHandler } - path := fmt.Sprintf("%s/%s", c.dir, sleepHandler) + path := filepath.Join(c.dir, domain.ScrollDataDir, "coldstart", sleepHandler) go func(port *domain.AugmentedPort) { var handler ports.ColdStarterHandlerInterface @@ -121,7 +122,7 @@ func (c *ColdStarter) Serve(ctx context.Context) { c.handlerMu.Lock() defer c.handlerMu.Unlock() c.handler[port.Name] = udpServer - } else if port.Protocol == "tcp" { + } else if port.Protocol == "tcp" || port.Protocol == "http" || port.Protocol == "https" || port.Protocol == "" { logger.Log().Info(fmt.Sprintf("Starting TCP server on port %d", port.Port.Port)) tcpServer := servers.NewTCP(handler) err := tcpServer.Start(port.Port.Port, finishFunc) diff --git a/internal/core/services/nix_dependency_service.go b/internal/core/services/nix_dependency_service.go deleted file mode 100644 index eb9a8439..00000000 --- a/internal/core/services/nix_dependency_service.go +++ /dev/null @@ -1,34 +0,0 @@ -package services - -import ( - "fmt" - "os/exec" - - "al.essio.dev/pkg/shellescape" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type NixDependencyService struct{} - -func NewNixDependencyService() *NixDependencyService { return &NixDependencyService{} } - -func (s *NixDependencyService) EnsureNixInstalled() error { - if _, err := exec.LookPath("nix-shell"); err != nil { - return fmt.Errorf("nix-shell not found in PATH; install Nix from https://nixos.org/download and ensure 'nix-shell' is available: %w", err) - } - return nil -} - -func (s *NixDependencyService) GetCommand(cmd []string, deps []string) []string { - - var cmds = []string{"nix-shell"} - for _, dep := range deps { - cmds = append(cmds, "-p", dep) - } - cmds = append(cmds, "--command", shellescape.QuoteCommand(cmd)) - - return cmds - -} - -var _ ports.NixDependencyServiceInterface = (*NixDependencyService)(nil) diff --git a/internal/core/services/nix_dependency_service_test.go b/internal/core/services/nix_dependency_service_test.go deleted file mode 100644 index 4cdd866b..00000000 --- a/internal/core/services/nix_dependency_service_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package services - -import ( - "testing" -) - -func TestEnsureNixInstalled(t *testing.T) { - svc := NewNixDependencyService() - err := svc.EnsureNixInstalled() - // We accept either outcome because CI/dev environment may or may not have nix. - if err != nil { - t.Logf("nix-shell not found (acceptable if Nix not installed): %v", err) - } -} - -func TestGetCommandPassthrough(t *testing.T) { - svc := NewNixDependencyService() - in := []string{"echo", "hello"} - deps := []string{"nodejs", "python3"} - out := svc.GetCommand(in, deps) - expected := []string{"nix-shell", "-p", "nodejs", "-p", "python3", "--command", "echo hello"} - for i, v := range expected { - if out[i] != v { - t.Errorf("expected out[%d] = %s got %s", i, v, out[i]) - } - } -} diff --git a/internal/core/services/plugin_manager.go b/internal/core/services/plugin_manager.go deleted file mode 100644 index 4d521b46..00000000 --- a/internal/core/services/plugin_manager.go +++ /dev/null @@ -1,196 +0,0 @@ -package services - -import ( - "crypto/sha256" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/utils/logger" - commons "github.com/highcard-dev/daemon/plugin" - - "github.com/hashicorp/go-plugin" -) - -type NotifcationHandler struct { - broadcast chan *domain.StreamItem -} - -func (n *NotifcationHandler) NotifyConsole(mode string, data string) error { - go func() { - - item := domain.StreamItem{ - Stream: mode, - Data: data, - } - n.broadcast <- &item - }() - - return nil -} - -type PluginManager struct { - Modes map[string]string - plugins map[string]commons.DruidPluginInterface - allowedStandaloneModes []string - NotifyConsole chan *domain.StreamItem -} - -func NewPluginManager() *PluginManager { - return &PluginManager{ - Modes: make(map[string]string), - plugins: make(map[string]commons.DruidPluginInterface), - NotifyConsole: make(chan *domain.StreamItem), - } -} - -func (pm *PluginManager) GetNotifyConsoleChannel() chan *domain.StreamItem { - return pm.NotifyConsole -} - -func (pm *PluginManager) ParseFromScroll(pluginDefinitionMap map[string]map[string]string, config string, cwd string) error { - for pluginName, pluginDefinition := range pluginDefinitionMap { - p, err := pm.LoadGoPlugin(pluginName) - if err != nil { - return err - } - n := NotifcationHandler{ - broadcast: pm.NotifyConsole, - } - err = p.Init(pluginDefinition, &n, cwd, config) - if err != nil { - return fmt.Errorf("error initializing plugin %s: %s", pluginName, err.Error()) - } - modes, _ := p.GetModes() - pm.plugins[pluginName] = p - for _, mode := range modes { - if mode.Standalone { - pm.AddStandaloneMode(mode.Mode) - } - pm.Modes[mode.Mode] = pluginName - } - } - - return nil -} - -func (pm *PluginManager) HasMode(mode string) bool { - if _, ok := pm.Modes[mode]; ok { - return true - } - return false -} - -func (pm *PluginManager) RunProcedure(mode string, value string) (string, error) { - if pm.HasMode(mode) { - return pm.plugins[pm.Modes[mode]].RunProcedure(mode, value) - } else { - return "", errors.New("mode not suported by any plugin") - } -} - -func (pm *PluginManager) LoadGoPlugin(name string) (commons.DruidPluginInterface, error) { - - var handshakeConfig = plugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "DRUID_PLUGIN", - MagicCookieValue: "druid_is_the_way", - } - - // pluginMap is the map of plugins we can dispense. - var pluginMap = map[string]plugin.Plugin{ - "rcon": &commons.DruidRpcPlugin{}, - "rcon_web_rust": &commons.DruidRpcPlugin{}, - } - - ex, err := os.Executable() - if err != nil { - panic(err) - } - exPath := filepath.Dir(ex) - var path string - _, err = os.Stat(exPath + "/druid_" + name) - if err == nil { - path = exPath + "/druid_" + name - } else { - path = "./druid_" + name - } - - var cmd *exec.Cmd - - if os.Getenv("DRUID_DEBUG_PATH") != "" { - cmd = exec.Command("/bin/sh", os.Getenv("DRUID_DEBUG_PATH"), path) - } else { - cmd = exec.Command(path) - } - // This doesn't add more security than before - // but removes the SecureConfig is nil warning. - pluginChecksum, err := getPluginExecutableChecksum(path) - if err != nil { - return nil, fmt.Errorf("unable to generate a checksum for the plugin %s: %w", path, err) - } - - // We're a host! Start by launching the plugin process. - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: handshakeConfig, - Plugins: pluginMap, - Cmd: cmd, - Logger: logger.Hclog2ZapLogger{Zap: logger.Log()}, - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolNetRPC, plugin.ProtocolGRPC}, - SecureConfig: &plugin.SecureConfig{ - Checksum: pluginChecksum, - Hash: sha256.New(), - }, - }) - //defer client.Kill() - - // Connect via RPC - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - // Request the plugin - raw, err := rpcClient.Dispense(name) - if err != nil { - return nil, err - } - rpcConnection := raw.(commons.DruidPluginInterface) - - return rpcConnection, nil -} - -func (pm *PluginManager) CanRunStandaloneProcedure(mode string) bool { - for _, standaloneMode := range pm.allowedStandaloneModes { - if standaloneMode == mode { - return true - } - } - return false -} - -func (pm *PluginManager) AddStandaloneMode(mode string) { - pm.allowedStandaloneModes = append(pm.allowedStandaloneModes, mode) -} -func getPluginExecutableChecksum(executablePath string) ([]byte, error) { - pathHash := sha256.New() - file, err := os.Open(executablePath) - - if err != nil { - return nil, err - } - - defer file.Close() - - _, err = io.Copy(pathHash, file) - if err != nil { - return nil, err - } - - return pathHash.Sum(nil), nil -} diff --git a/internal/core/services/port_service.go b/internal/core/services/port_service.go index 9b597367..ed31d5a3 100644 --- a/internal/core/services/port_service.go +++ b/internal/core/services/port_service.go @@ -1,372 +1,52 @@ package services import ( - "context" "fmt" "os" "strconv" "strings" "time" - "github.com/gopacket/gopacket" - "github.com/gopacket/gopacket/layers" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/utils/logger" - pcap "github.com/packetcap/go-pcap" - "github.com/shirou/gopsutil/net" "go.uber.org/zap" ) -type PortMonitor struct { - ports []*domain.AugmentedPort - portPoolInterval time.Duration +type PortService struct { + ports []*domain.AugmentedPort } -func NewPortServiceWithScrollFile( - file *domain.File, -) *PortMonitor { - p := &PortMonitor{ - portPoolInterval: 5 * time.Second, - } +func NewPortServiceWithScrollFile(file *domain.File) *PortService { + p := &PortService{} p.SyncPortEnv(file) return p } -func NewPortService(ports []int) *PortMonitor { - ap := make([]*domain.AugmentedPort, len(ports)) - - for idx, port := range ports { - ap[idx] = &domain.AugmentedPort{ - Port: domain.Port{ - Name: fmt.Sprintf("port%d", port), - Port: port, - }, - InactiveSince: time.Now(), - InactiveSinceSec: 0, - } - } - - p := &PortMonitor{ - ports: ap, - portPoolInterval: 5 * time.Second, - } - return p -} - -func (p *PortMonitor) SyncPortEnv(file *domain.File) []*domain.AugmentedPort { - ports := file.Ports - +func (p *PortService) SyncPortEnv(file *domain.File) []*domain.AugmentedPort { var augmentedPorts []*domain.AugmentedPort - - for _, port := range ports { - - //TODO: get rid of this and set this directly in scroll.yaml, when templating is implemented + for _, port := range file.Ports { portEnvName := fmt.Sprintf("DRUID_PORT_%s", strings.ToUpper(port.Name)) envPort := os.Getenv(portEnvName) - if envPort != "" && port.Port == 0 { portInt, err := strconv.Atoi(envPort) if err == nil { port.Port = portInt } } - if port.Port == 0 { - logger.Log().Warn("Could no find port number for port", zap.String("port", port.Name)) + logger.Log().Warn("Could not find port number for port", zap.String("port", port.Name)) continue } - augmentedPorts = append(augmentedPorts, &domain.AugmentedPort{ Port: port, InactiveSince: time.Now(), }) os.Setenv(portEnvName, strconv.Itoa(port.Port)) } - p.ports = augmentedPorts return p.ports } -func (p *PortMonitor) GetLastActivity(port int) uint { - for _, p := range p.ports { - if p.Port.Port == port { - return uint(time.Since(p.InactiveSince).Seconds()) - } - } - - return 0 -} - -func (po *PortMonitor) GetPorts() []*domain.AugmentedPort { - for _, p := range po.ports { - p.Open = po.CheckOpen(p.Port.Port) - - inactiveCorrected := time.Since(p.InactiveSince) - po.portPoolInterval - if inactiveCorrected < 0 { - p.InactiveSinceSec = 0 - } else { - p.InactiveSinceSec = uint(inactiveCorrected.Seconds()) - } - } - - return po.ports -} - -func (p *PortMonitor) ResetOpenPorts() { - for _, p := range p.ports { - p.InactiveSince = time.Now() - } -} - -func (p *PortMonitor) GetPort(port int) *domain.AugmentedPort { - for _, p := range p.ports { - if p.Port.Port == port { - return p - } - } - return nil -} - -func (p *PortMonitor) AddPort(port domain.Port) (*domain.AugmentedPort, error) { - // Validate port range - if port.Port < 1 || port.Port > 65535 { - return nil, fmt.Errorf("port number must be between 1 and 65535, got %d", port.Port) - } - - // Validate protocol - protocol := strings.ToLower(port.Protocol) - if protocol != "tcp" && protocol != "udp" { - return nil, fmt.Errorf("protocol must be 'tcp' or 'udp', got '%s'", port.Protocol) - } - port.Protocol = protocol - - // Check for duplicate port number - for _, existingPort := range p.ports { - if existingPort.Port.Port == port.Port { - return nil, fmt.Errorf("port %d is already being watched", port.Port) - } - } - - augmentedPort := &domain.AugmentedPort{ - Port: port, - InactiveSince: time.Now(), - } - - p.ports = append(p.ports, augmentedPort) - return augmentedPort, nil -} - -func (p *PortMonitor) RemovePort(port int) error { - for i, existingPort := range p.ports { - if existingPort.Port.Port == port { - p.ports = append(p.ports[:i], p.ports[i+1:]...) - return nil - } - } - return fmt.Errorf("port %d not found", port) -} - -func (p *PortMonitor) MandatoryPortsOpen() bool { - augmentedPorts := p.GetPorts() - - for _, port := range augmentedPorts { - if port.Mandatory && !port.Open { - logger.Log().Warn("Mandatory port not open", zap.String("port", port.Port.Name), zap.Int("portnum", port.Port.Port)) - return false - } - } - return true -} - -func (p *PortMonitor) CheckOpen(port int) bool { - //check if port is open - - connections, err := net.Connections("inet") - if err != nil { - return false - } - - for _, conn := range connections { - if conn.Laddr.Port == uint32(port) { - return true - } - } - return false -} - -func (p *PortMonitor) WaitForConnection(ifaces []string, ppm uint) error { - - var ports []int - for _, port := range p.ports { - if port.Port.CheckActivity { - ports = append(ports, port.Port.Port) - } - } - - if len(ports) == 0 { - return fmt.Errorf("no ports to monitor") - } - logger.Log().Info("Starting port monitoring", - zap.Ints("ports", ports), - zap.Strings("ifaces", ifaces), - zap.Uint("ppm", ppm), - ) - for { - - firstOnlinePort := p.StartMonitorPorts(ports, ifaces, 5*time.Minute, ppm) - - if firstOnlinePort == nil { - continue - } - - for _, port := range p.ports { - //this is not right but sufficient for now, later we should only update one port - port.InactiveSince = time.Now() - } - - time.Sleep(p.portPoolInterval) - } -} - -func (p *PortMonitor) StartMonitoring(ctx context.Context, ifaces []string, ppm uint) { - //start monitoring the ports - for { - select { - case <-ctx.Done(): - return - default: - err := p.WaitForConnection(ifaces, ppm) - if err != nil { - logger.Log().Error("Error while waiting for connection", zap.Error(err)) - return - } - } - } -} - -func (p *PortMonitor) StartMonitorPorts(ports []int, ifaces []string, timeout time.Duration, ppm uint) *int { - - // Find all network interfaces - - logger.Log().Debug("Found interfaces", zap.Strings("ifaces", ifaces), zap.Strings("requestedInterfaces", ifaces)) - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - - var doneIface string - var donePort int - - for _, iface := range ifaces { - go func(po []int, i string) { - port, err := p.waitForPortActiviy(ctx, ports, i, ppm) - if err != nil { - logger.Log().Error("Error on port monitoring", zap.String("iface", i), zap.Ints("ports", po), zap.Error(err)) - return - } - - if port == 0 { - return - } - donePort = port - doneIface = i - cancel() - }(ports, iface) - } - - <-ctx.Done() - - //this is not needed, but it's a good practice to call it - cancel() - - if doneIface != "" { - logger.Log().Debug("Port activity found", zap.String("iface", doneIface), zap.Int("port", donePort)) - return &donePort - } else { - logger.Log().Debug("No port activity found on any interface\n") - return nil - } - -} -func (p *PortMonitor) waitForPortActiviy(ctx context.Context, ports []int, interfaceName string, ppm uint) (int, error) { - - handle, err := pcap.OpenLive(interfaceName, 1600, true, time.Hour, false) - if err != nil { - return 0, err - } - - go func() { - <-ctx.Done() - logger.Log().Debug("Closing handle ", zap.String("iface", interfaceName), zap.Ints("ports", ports)) - handle.Close() - }() - - portFilterParts := make([]string, len(ports)) - - for idx, port := range ports { - portFilterParts[idx] = fmt.Sprintf("port %d", port) - } - - filter := strings.Join(portFilterParts, " or ") - - err = handle.SetBPFFilter(filter) - if err != nil { - return 0, err - } - logger.Log().Debug("Listening on iface", zap.String("iface", interfaceName), zap.Ints("ports", ports)) - - lt1 := layers.LinkType(handle.LinkType()) - packetSource := gopacket.NewPacketSource(handle, lt1) - - // Introduce a ticker to reset packet count every minute - packetCount := 0 - ticker := time.NewTicker(time.Minute) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return 0, nil - case packet := <-packetSource.Packets(): - if packet == nil { - continue - } - - // Process the packet and check if it has an application layer - if packet.ApplicationLayer() == nil { - continue - } - - var packetPort = 0 - - if transportLayer := packet.TransportLayer(); transportLayer != nil { - packetPortStr := transportLayer.TransportFlow().Dst().String() - packetPort, err = strconv.Atoi(packetPortStr) - if err != nil { - packetPort = 0 - } - } - - var srcIP, dstIP string - if netLayer := packet.NetworkLayer(); netLayer != nil { - srcIP = netLayer.NetworkFlow().Src().String() - dstIP = netLayer.NetworkFlow().Dst().String() - } - - logger.Log().Debug("Packet found on iface", - zap.String("iface", interfaceName), zap.Int("port", packetPort), - zap.String("srcIP", srcIP), zap.String("dstIP", dstIP), - ) - - // Increment packet count - packetCount++ - - // Check if we have reached the packets per minute threshold - if packetCount >= int(ppm) { - logger.Log().Info("PPM threshhold reached", zap.String("iface", interfaceName), zap.Int("ppm", int(ppm))) - return packetPort, nil - } - case <-ticker.C: - // Reset packet count every minute - packetCount = 0 - } - } +func (p *PortService) GetPorts() []*domain.AugmentedPort { + return p.ports } diff --git a/internal/core/services/port_service_test.go b/internal/core/services/port_service_test.go deleted file mode 100644 index cd23df7a..00000000 --- a/internal/core/services/port_service_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package services_test - -import ( - "testing" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services" -) - -func TestPortMonitor_AddPort_Success(t *testing.T) { - pm := services.NewPortService([]int{}) - - port := domain.Port{ - Port: 8080, - Protocol: "tcp", - Name: "http", - } - - result, err := pm.AddPort(port) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Port.Port != 8080 { - t.Errorf("Expected port 8080, got %d", result.Port.Port) - } - if result.Port.Protocol != "tcp" { - t.Errorf("Expected protocol tcp, got %s", result.Port.Protocol) - } - if result.Port.Name != "http" { - t.Errorf("Expected name http, got %s", result.Port.Name) - } - if result.InactiveSince.IsZero() { - t.Error("Expected InactiveSince to be set") - } - - ports := pm.GetPorts() - if len(ports) != 1 { - t.Errorf("Expected 1 port, got %d", len(ports)) - } -} - -func TestPortMonitor_AddPort_InvalidPortRange(t *testing.T) { - pm := services.NewPortService([]int{}) - - tests := []struct { - name string - port int - }{ - {"port zero", 0}, - {"negative port", -1}, - {"port too high", 65536}, - {"port way too high", 100000}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := pm.AddPort(domain.Port{ - Port: tt.port, - Protocol: "tcp", - Name: "test", - }) - if err == nil { - t.Errorf("Expected error for port %d, got nil", tt.port) - } - }) - } -} - -func TestPortMonitor_AddPort_InvalidProtocol(t *testing.T) { - pm := services.NewPortService([]int{}) - - tests := []struct { - name string - protocol string - }{ - {"empty protocol", ""}, - {"invalid protocol", "http"}, - {"icmp", "icmp"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := pm.AddPort(domain.Port{ - Port: 8080, - Protocol: tt.protocol, - Name: "test", - }) - if err == nil { - t.Errorf("Expected error for protocol '%s', got nil", tt.protocol) - } - }) - } -} - -func TestPortMonitor_AddPort_ValidProtocols(t *testing.T) { - tests := []struct { - name string - protocol string - }{ - {"tcp lowercase", "tcp"}, - {"udp lowercase", "udp"}, - {"TCP uppercase", "TCP"}, - {"UDP uppercase", "UDP"}, - {"Tcp mixed case", "Tcp"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pm := services.NewPortService([]int{}) - result, err := pm.AddPort(domain.Port{ - Port: 8080, - Protocol: tt.protocol, - Name: "test", - }) - if err != nil { - t.Errorf("Expected no error for protocol '%s', got %v", tt.protocol, err) - } - if result.Port.Protocol != "tcp" && result.Port.Protocol != "udp" { - t.Errorf("Expected protocol to be normalized, got '%s'", result.Port.Protocol) - } - }) - } -} - -func TestPortMonitor_AddPort_Duplicate(t *testing.T) { - pm := services.NewPortService([]int{8080}) - - _, err := pm.AddPort(domain.Port{ - Port: 8080, - Protocol: "tcp", - Name: "duplicate", - }) - if err == nil { - t.Error("Expected error for duplicate port, got nil") - } -} - -func TestPortMonitor_AddPort_MultipleDifferentPorts(t *testing.T) { - pm := services.NewPortService([]int{}) - - _, err := pm.AddPort(domain.Port{Port: 8080, Protocol: "tcp", Name: "http"}) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - _, err = pm.AddPort(domain.Port{Port: 443, Protocol: "tcp", Name: "https"}) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - _, err = pm.AddPort(domain.Port{Port: 27015, Protocol: "udp", Name: "game"}) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - ports := pm.GetPorts() - if len(ports) != 3 { - t.Errorf("Expected 3 ports, got %d", len(ports)) - } -} - -func TestPortMonitor_RemovePort_Success(t *testing.T) { - pm := services.NewPortService([]int{8080, 443}) - - err := pm.RemovePort(8080) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - ports := pm.GetPorts() - if len(ports) != 1 { - t.Errorf("Expected 1 port, got %d", len(ports)) - } - if ports[0].Port.Port != 443 { - t.Errorf("Expected remaining port 443, got %d", ports[0].Port.Port) - } -} - -func TestPortMonitor_RemovePort_NotFound(t *testing.T) { - pm := services.NewPortService([]int{8080}) - - err := pm.RemovePort(9999) - if err == nil { - t.Error("Expected error for non-existent port, got nil") - } -} - -func TestPortMonitor_RemovePort_EmptyList(t *testing.T) { - pm := services.NewPortService([]int{}) - - err := pm.RemovePort(8080) - if err == nil { - t.Error("Expected error for removing from empty list, got nil") - } -} - -func TestPortMonitor_RemovePort_LastPort(t *testing.T) { - pm := services.NewPortService([]int{8080}) - - err := pm.RemovePort(8080) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - ports := pm.GetPorts() - if len(ports) != 0 { - t.Errorf("Expected 0 ports, got %d", len(ports)) - } -} - -func TestPortMonitor_AddThenRemove(t *testing.T) { - pm := services.NewPortService([]int{}) - - _, err := pm.AddPort(domain.Port{Port: 8080, Protocol: "tcp", Name: "http"}) - if err != nil { - t.Fatalf("Unexpected error adding port: %v", err) - } - - err = pm.RemovePort(8080) - if err != nil { - t.Fatalf("Unexpected error removing port: %v", err) - } - - ports := pm.GetPorts() - if len(ports) != 0 { - t.Errorf("Expected 0 ports after add and remove, got %d", len(ports)) - } - - // Should be able to add again after removing - _, err = pm.AddPort(domain.Port{Port: 8080, Protocol: "tcp", Name: "http"}) - if err != nil { - t.Fatalf("Unexpected error re-adding port: %v", err) - } - - ports = pm.GetPorts() - if len(ports) != 1 { - t.Errorf("Expected 1 port after re-add, got %d", len(ports)) - } -} diff --git a/internal/core/services/procedure_launcher.go b/internal/core/services/procedure_launcher.go index 96325cda..27d9b48b 100644 --- a/internal/core/services/procedure_launcher.go +++ b/internal/core/services/procedure_launcher.go @@ -2,75 +2,37 @@ package services import ( "errors" - "fmt" "sync" - "time" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils" "github.com/highcard-dev/daemon/internal/utils/logger" "go.uber.org/zap" ) type ProcedureLauncher struct { - pluginManager ports.PluginManagerInterface - processManager ports.ProcessManagerInterface - ociRegistry ports.OciRegistryInterface - consoleManager ports.ConsoleManagerInterface - logManager ports.LogManagerInterface - scrollService ports.ScrollServiceInterface - nixDependencyService ports.NixDependencyServiceInterface - procedures map[string]domain.ScrollLockStatus - proceduresMutex *sync.Mutex + runtimeBackend ports.RuntimeBackendInterface + runtimeDataRoot string + scrollService ports.ScrollServiceInterface + procedures map[string]domain.ScrollLockStatus + proceduresMutex *sync.Mutex } func NewProcedureLauncher( - ociRegistry ports.OciRegistryInterface, - processManager ports.ProcessManagerInterface, - pluginManager ports.PluginManagerInterface, - consoleManager ports.ConsoleManagerInterface, - logManager ports.LogManagerInterface, scrollService ports.ScrollServiceInterface, - dependecyResolution string, + runtimeBackend ports.RuntimeBackendInterface, + runtimeDataRoot string, ) (*ProcedureLauncher, error) { - var nixDependencyService ports.NixDependencyServiceInterface = nil - - switch dependecyResolution { - case "nix": - logger.Log().Info("Using Nix for dependency resolution") - nixDependencyService = NewNixDependencyService() - err := nixDependencyService.EnsureNixInstalled() - if err != nil { - return nil, err - } - case "external": - logger.Log().Info("Using external system for dependency resolution") - default: - logger.Log().Warn("Unknown dependency resolution strategy, falling back to 'auto'", zap.String("dependecyResolution", dependecyResolution)) - fallthrough - case "auto": - logger.Log().Info("Using automatic dependency resolution") - nixDependencyService = NewNixDependencyService() - err := nixDependencyService.EnsureNixInstalled() - if err != nil { - logger.Log().Info("Nix not found, falling back to external system for dependency resolution") - nixDependencyService = nil - } else { - logger.Log().Info("Nix found, using Nix for dependency resolution") - } + if runtimeBackend == nil { + return nil, errors.New("runtime backend is required") } s := &ProcedureLauncher{ - processManager: processManager, - ociRegistry: ociRegistry, - pluginManager: pluginManager, - consoleManager: consoleManager, - logManager: logManager, - scrollService: scrollService, - procedures: make(map[string]domain.ScrollLockStatus), - proceduresMutex: &sync.Mutex{}, - nixDependencyService: nixDependencyService, + runtimeBackend: runtimeBackend, + runtimeDataRoot: runtimeDataRoot, + scrollService: scrollService, + procedures: make(map[string]domain.ScrollLockStatus), + proceduresMutex: &sync.Mutex{}, } return s, nil @@ -88,217 +50,41 @@ func (sc *ProcedureLauncher) GetProcedureStatuses() map[string]domain.ScrollLock return sc.procedures } -func (sc *ProcedureLauncher) LaunchPlugins() error { - go func() { - for { - select { - case item := <-sc.pluginManager.GetNotifyConsoleChannel(): - sc.logManager.AddLine(item.Stream, []byte(item.Data)) - - consoles := sc.consoleManager.GetConsoles() - //add console when stream is not found - console, ok := consoles[item.Stream] - if !ok { - console, _ = sc.consoleManager.AddConsoleWithChannel(item.Stream, domain.ConsoleTypePlugin, item.Stream, make(chan string)) - } - console.Channel.Broadcast([]byte(item.Data)) - } - } - }() - - scroll := sc.scrollService.GetFile() - - //init plugins - return sc.pluginManager.ParseFromScroll(scroll.Plugins, string(sc.scrollService.GetScrollConfigRawYaml()), sc.scrollService.GetCwd()) -} - -// I am unsure if we should support he command mode in the future as it is an antipattern for the scroll architecture, we try to solve stuff with dependencies -func (sc *ProcedureLauncher) Run(cmd string, runCommandCb func(cmd string) error) error { - +func (sc *ProcedureLauncher) Run(cmd string) error { command, err := sc.scrollService.GetCommand(cmd) if err != nil { sc.setProcedureStatus(cmd, domain.ScrollLockStatusError) return err } - deps := command.Dependencies - for idx, proc := range command.Procedures { - - commandIdx := fmt.Sprintf("%s.%d", cmd, idx) - - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusRunning) - - if proc.Mode == "command" { - if proc.Wait != nil { - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusError) - return errors.New("command mode does not support wait") - } - err = runCommandCb(proc.Data.(string)) - if err != nil { - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusError) - return err - } - continue - } - - if proc.Id != nil { - commandIdx = *proc.Id - } - - var err error - var exitCode *int - logger.Log().Debug("Running procedure", - zap.String("cmd", commandIdx), - zap.String("mode", proc.Mode), - zap.Any("data", proc.Data), - ) - switch wait := proc.Wait.(type) { - case int: //run in go routine and wait for x seconds - go func(procedure domain.Procedure) { - time.Sleep(time.Duration(wait) * time.Second) - sc.RunProcedure(&procedure, commandIdx, deps) - }(*proc) - case bool: //run in go routine maybe wait - if wait { - _, exitCode, err = sc.RunProcedure(proc, commandIdx, deps) - if err != nil { - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusError) - return err - } - } else { - go sc.RunProcedure(proc, commandIdx, deps) - } - default: //run and wait - _, exitCode, err = sc.RunProcedure(proc, commandIdx, deps) - if err != nil { - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusError) - return err - } - } - - if err != nil { - logger.Log().Error("Error running procedure", - zap.String("cmd", commandIdx), - zap.Error(err)) - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusError) - return err - } - - if exitCode != nil && *exitCode != 0 { - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusError) - if proc.IgnoreFailure { - logger.Log().Warn("Procedure failed but ignoring failure", - zap.String("cmd", commandIdx), - zap.Int("exitCode", *exitCode), - ) - continue - } - logger.Log().Error("Procedure ended with exit code "+fmt.Sprintf("%d", *exitCode), - zap.String("cmd", commandIdx), - zap.Int("exitCode", *exitCode), - ) - return fmt.Errorf("procedure %s failed with exit code %d", proc.Mode, *exitCode) - } - - if exitCode == nil { - logger.Log().Debug("Procedure ended") - } else { - logger.Log().Debug("Procedure ended with exit code 0") - } - sc.setProcedureStatus(commandIdx, domain.ScrollLockStatusDone) - } - - return nil -} - -func (sc *ProcedureLauncher) RunProcedure(proc *domain.Procedure, cmd string, dependencies []string) (string, *int, error) { - - logger.Log().Info("Running procedure", + logger.Log().Info("Running command", zap.String("cmd", cmd), - zap.String("mode", proc.Mode), - zap.Any("data", proc.Data), + zap.String("runMode", string(command.Run)), ) - processCwd := sc.scrollService.GetCwd() - //check if we have a plugin for the mode - if sc.pluginManager.HasMode(proc.Mode) { - - val, ok := proc.Data.(string) - if !ok { - return "", nil, fmt.Errorf("invalid data type for plugin mode %s, expected data to be string but go %v", proc.Mode, proc.Data) - } - - res, err := sc.pluginManager.RunProcedure(proc.Mode, val) - logger.Log().Error("Error running plugin procedure", zap.Error(err)) - return res, nil, err + dataRoot := sc.runtimeDataRoot + if dataRoot == "" { + dataRoot = sc.scrollService.GetCwd() } - - var err error - //check internal - switch proc.Mode { - //exec = create new process - case "exec-tty": - fallthrough - case "exec": - var instructions []string - instructions, err = utils.InterfaceToStringSlice(proc.Data) - if err != nil { - return "", nil, err - } - - var err error - var exitCode *int - - if sc.nixDependencyService != nil && len(dependencies) > 0 { - instructions = sc.nixDependencyService.GetCommand(instructions, dependencies) - } - - logger.Log().Debug("Running exec process", - zap.String("cwd", processCwd), - zap.Strings("instructions", instructions), - ) - - if proc.Mode == "exec-tty" { - exitCode, err = sc.processManager.RunTty(cmd, instructions, processCwd) - } else { - exitCode, err = sc.processManager.Run(cmd, instructions, processCwd) - } - return "", exitCode, err - case "stdin": - var instructions []string - instructions, err = utils.InterfaceToStringSlice(proc.Data) - if err != nil { - return "", nil, err - } - - if len(instructions) != 2 { - return "", nil, errors.New("invalid stdin instructions") - } - commandToWriteTo := instructions[0] - stdtIn := instructions[1] - - logger.Log().Debug("Launching stdin process", - zap.String("cwd", processCwd), - zap.Strings("instructions", instructions), - ) - - process := sc.processManager.GetRunningProcess(commandToWriteTo) - if process == nil { - return "", nil, errors.New("process not found") + sc.setProcedureStatus(cmd, domain.ScrollLockStatusRunning) + exitCode, err := sc.runtimeBackend.RunCommand(ports.RuntimeCommand{ + Name: cmd, + Command: command, + DataRoot: dataRoot, + GlobalPorts: sc.scrollService.GetFile().Ports, + }) + if err != nil { + sc.setProcedureStatus(cmd, domain.ScrollLockStatusError) + return err + } + if exitCode != nil && *exitCode != 0 { + sc.setProcedureStatus(cmd, domain.ScrollLockStatusError) + return &domain.CommandExecutionError{ + Command: cmd, + ExitCode: *exitCode, + Err: errors.New("command failed"), } - sc.processManager.WriteStdin(process, stdtIn) - - case "scroll-switch": - - logger.Log().Debug("Launching scroll-switch process", - zap.String("cwd", processCwd), - zap.String("instructions", proc.Data.(string)), - ) - - err := sc.ociRegistry.Pull(sc.scrollService.GetDir(), proc.Data.(string)) - return "", nil, err - default: - return "", nil, errors.New("Unknown mode " + proc.Mode) } - return "", nil, nil + sc.setProcedureStatus(cmd, domain.ScrollLockStatusDone) + return nil } diff --git a/internal/core/services/procedure_launcher_test.go b/internal/core/services/procedure_launcher_test.go new file mode 100644 index 00000000..0a177125 --- /dev/null +++ b/internal/core/services/procedure_launcher_test.go @@ -0,0 +1,52 @@ +package services_test + +import ( + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + "github.com/highcard-dev/daemon/internal/core/services" + mock_ports "github.com/highcard-dev/daemon/test/mock" + "go.uber.org/mock/gomock" +) + +func TestProcedureLauncherPassesCommandContextToRuntimeBackend(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) + command := &domain.CommandInstructionSet{ + Run: domain.RunModePersistent, + Procedures: []*domain.Procedure{{ + Image: "alpine:3.20", + }}, + } + file := &domain.File{Ports: []domain.Port{{Name: "http", Port: 80}}} + + scrollService.EXPECT().GetCommand("serve").Return(command, nil) + scrollService.EXPECT().GetFile().Return(file) + runtimeBackend.EXPECT().RunCommand(gomock.Any()).DoAndReturn(func(runtimeCommand ports.RuntimeCommand) (*int, error) { + if runtimeCommand.Name != "serve" { + t.Fatalf("Name = %s, want serve", runtimeCommand.Name) + } + if runtimeCommand.Command != command { + t.Fatal("Command was not forwarded to runtime backend") + } + if runtimeCommand.DataRoot != "/runtime-data" { + t.Fatalf("DataRoot = %s, want /runtime-data", runtimeCommand.DataRoot) + } + if len(runtimeCommand.GlobalPorts) != 1 || runtimeCommand.GlobalPorts[0].Name != "http" { + t.Fatalf("GlobalPorts = %#v", runtimeCommand.GlobalPorts) + } + return nil, nil + }) + + launcher, err := services.NewProcedureLauncher(scrollService, runtimeBackend, "/runtime-data") + if err != nil { + t.Fatal(err) + } + if err := launcher.Run("serve"); err != nil { + t.Fatal(err) + } +} diff --git a/internal/core/services/process_manager.go b/internal/core/services/process_manager.go deleted file mode 100644 index 4f2566cc..00000000 --- a/internal/core/services/process_manager.go +++ /dev/null @@ -1,309 +0,0 @@ -package services - -import ( - "bufio" - "context" - "errors" - "io" - "os" - "os/exec" - "sync" - - "github.com/creack/pty" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -type ProcessManager struct { - mu sync.Mutex - runningProcesses map[string]*domain.Process - logManager ports.LogManagerInterface - consoleManager ports.ConsoleManagerInterface - processMonitor ports.ProcessMonitorInterface -} - -func NewProcessManager(logManager ports.LogManagerInterface, consoleManager ports.ConsoleManagerInterface, processMonitor ports.ProcessMonitorInterface) *ProcessManager { - return &ProcessManager{ - runningProcesses: make(map[string]*domain.Process), - logManager: logManager, - consoleManager: consoleManager, - processMonitor: processMonitor, - } -} - -func (po *ProcessManager) RunTty(commandName string, command []string, cwd string) (*int, error) { - - process := domain.Process{ - Name: commandName, - Type: "process_tty", - } - - if process.Cmd != nil { - return nil, errors.New("process already running") - } - - name, args := command[0], command[1:] - - logger.Log().Debug("LaunchTty", - zap.String("processName", name), - zap.Strings("args", args), - zap.String("dir", cwd), - ) - - process.Cmd = exec.Command(name, args...) - process.Cmd.Dir = cwd - process.Cmd.Env = envWithDefaultTerm(os.Environ()) - - logger.Log().Info("Starting tty process", zap.String("commandName", commandName), zap.String("name", name), zap.Strings("args", args), zap.String("dir", cwd)) - - out, err := pty.Start(process.Cmd) - if err != nil { - return nil, err - } - - process.StdIn = out - - //self register process - po.AddRunningProcess(commandName, &process) - - //add process for monitoring - po.processMonitor.AddProcess(int32(process.Cmd.Process.Pid), commandName) - - //slight difference to normal process, as we only attach after the process has started - //add console output - - var exitCode int - - combinedChannel := make(chan string, 20) - var readWG sync.WaitGroup - readWG.Add(1) - go func() { - defer readWG.Done() - defer close(combinedChannel) - tmpBuffer := make([]byte, 1024) - for { - n, err := out.Read(tmpBuffer) - if n > 0 { - combinedChannel <- string(tmpBuffer[:n]) - } - if err != nil { - return - } - } - }() - - console, doneChan := po.consoleManager.AddConsoleWithChannel(commandName, domain.ConsoleTypeTTY, "stdin", combinedChannel) - - process.Cmd.Wait() - readWG.Wait() // drain PTY until EOF; early cancel dropped output vs. Wait() - - po.processMonitor.RemoveProcess(commandName) - po.RemoveProcess(commandName) - // Wait for goroutine to print everything (watchdog closes stdin) - exitCode = process.Cmd.ProcessState.ExitCode() - console.MarkExited(exitCode) - - <-doneChan - - process.Cmd = nil - - return &exitCode, nil -} - -func (po *ProcessManager) Run(commandName string, command []string, dir string) (*int, error) { - - process := domain.Process{ - Name: commandName, - Type: "process", - } - //Todo, add processmonitoring explicitly here - if process.Cmd != nil { - return nil, errors.New("process already running") - } - - cmdCtx, cmdDone := context.WithCancel(context.Background()) - - //Split command to slice - name, args := command[0], command[1:] - - logger.Log().Debug("Launch", - zap.String("commandName", commandName), - zap.String("name", name), - zap.Strings("args", args), - zap.String("dir", dir), - ) - - process.Cmd = exec.Command(name, args...) - process.Cmd.Dir = dir - - //process.Cmd.SysProcAttr = &syscall.SysProcAttr{ - // Setpgid: true, - //} - - stdoutReader, err := process.Cmd.StdoutPipe() - if err != nil { - cmdDone() - return nil, err - } - - stderrReader, err := process.Cmd.StderrPipe() - if err != nil { - cmdDone() - return nil, err - } - - stdin, err := process.Cmd.StdinPipe() - - if err != nil { - cmdDone() - return nil, err - } - - process.StdIn = stdin - - combinedChannel := make(chan string, 20) - - var wg sync.WaitGroup - - wg.Add(1) - //read stdout - go func() { - defer wg.Done() - scanner := bufio.NewScanner(stdoutReader) - for scanner.Scan() { - text := scanner.Text() - logger.Log().Debug(text) - println(text) - combinedChannel <- text + "\n" - } - }() - - wg.Add(1) - //read stderr - go func() { - defer wg.Done() - scanner := bufio.NewScanner(stderrReader) - for scanner.Scan() { - text := scanner.Text() - logger.Log().Debug(text) - println(text) - combinedChannel <- text + "\n" - } - - }() - - console, doneChan := po.consoleManager.AddConsoleWithChannel(commandName, domain.ConsoleTypeProcess, "stdin", combinedChannel) - - // Run and wait for Cmd to return, discard Status - err = process.Cmd.Start() - - if err != nil { - println("Error starting process", err) - cmdDone() - process.Cmd = nil - return nil, err - } - - //self register process - po.AddRunningProcess(commandName, &process) - - //add process for monitoring - po.processMonitor.AddProcess(int32(process.Cmd.Process.Pid), commandName) - - //add console output - - //WARNING MultiReader is not working as expected, it seems to block the process and process.Wait() never returns - //stdReader := io.MultiReader(stdoutReader, stderrReader) - - go func() { - wg.Wait() - - err := process.Cmd.Wait() - if err != nil { - logger.Log().Error("Error waiting for process", zap.Error(err)) - } - cmdDone() - - //stderrReader.Close() - //stdoutReader.Close() - //stdin.Close() - }() - - <-cmdCtx.Done() - - po.processMonitor.RemoveProcess(commandName) - po.RemoveProcess(commandName) - // Wait for goroutine to print everything (watchdog closes stdin) - exitCode := process.Cmd.ProcessState.ExitCode() - - console.MarkExited(exitCode) - - close(combinedChannel) - //we wait, sothat we are sure all data is written to the console - <-doneChan - - process.Cmd = nil - return &exitCode, nil -} - -func (pr *ProcessManager) WriteStdin(process *domain.Process, command string) error { - - if process.Cmd != nil { - logger.Log().Info(command, - zap.String("processName", process.Name), - ) - - if process.Type == "process_tty" { - //write as raw as possible, no need to add newline or any fancy shit - process.StdIn.Write([]byte(command)) - } else { - io.WriteString(process.StdIn, command+"\n") - } - - return nil - } - return errors.New("process not running") -} - -func (pm *ProcessManager) GetRunningProcesses() map[string]*domain.Process { - return pm.runningProcesses -} - -func (pm *ProcessManager) AddRunningProcess(commandName string, process *domain.Process) { - pm.mu.Lock() - defer pm.mu.Unlock() - - pm.runningProcesses[commandName] = process -} - -func (pm *ProcessManager) GetRunningProcess(commandName string) *domain.Process { - if process, ok := pm.GetRunningProcesses()[commandName]; ok { - return process - } - return nil -} - -func envWithDefaultTerm(env []string) []string { - for i, item := range env { - if len(item) < len("TERM=") || item[:len("TERM=")] != "TERM=" { - continue - } - if item == "TERM=" { - env[i] = "TERM=xterm-256color" - } else { - return env - } - return env - } - - return append(env, "TERM=xterm-256color") -} - -func (pm *ProcessManager) RemoveProcess(commandName string) { - pm.mu.Lock() - defer pm.mu.Unlock() - - delete(pm.runningProcesses, commandName) -} diff --git a/internal/core/services/process_manager_env_test.go b/internal/core/services/process_manager_env_test.go deleted file mode 100644 index bc7f870f..00000000 --- a/internal/core/services/process_manager_env_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package services - -import "testing" - -func TestEnvWithDefaultTermAddsTermWhenMissing(t *testing.T) { - env := envWithDefaultTerm([]string{"PATH=/usr/bin"}) - - if got := env[len(env)-1]; got != "TERM=xterm-256color" { - t.Fatalf("expected default TERM to be appended, got %q", got) - } -} - -func TestEnvWithDefaultTermReplacesEmptyTerm(t *testing.T) { - env := envWithDefaultTerm([]string{"TERM=", "PATH=/usr/bin"}) - - if got := env[0]; got != "TERM=xterm-256color" { - t.Fatalf("expected empty TERM to be replaced, got %q", got) - } -} - -func TestEnvWithDefaultTermPreservesExistingTerm(t *testing.T) { - env := envWithDefaultTerm([]string{"TERM=screen-256color", "PATH=/usr/bin"}) - - if got := env[0]; got != "TERM=screen-256color" { - t.Fatalf("expected existing TERM to be preserved, got %q", got) - } -} diff --git a/internal/core/services/process_manager_test.go b/internal/core/services/process_manager_test.go deleted file mode 100644 index 151c4deb..00000000 --- a/internal/core/services/process_manager_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package services_test - -import ( - "testing" - - "github.com/highcard-dev/daemon/internal/core/services" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -func TestProcessManager(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - logManager := mock_ports.NewMockLogManagerInterface(ctrl) - consoleManager := services.NewConsoleManager(logManager) - processMonitor := mock_ports.NewMockProcessMonitorInterface(ctrl) - processManager := services.NewProcessManager(logManager, consoleManager, processMonitor) - t.Run("Run", func(t *testing.T) { - - processMonitor.EXPECT().AddProcess(gomock.Any(), "echo.1").Times(1) - processMonitor.EXPECT().RemoveProcess("echo.1").Times(1) - logManager.EXPECT().AddLine("echo.1", []byte("hello\n")).Times(1) - exitCode, err := processManager.Run("echo.1", []string{"echo", "hello"}, "/tmp") - - if err != nil { - t.Error(err) - } - - if *exitCode != 0 { - t.Errorf("expected 0, got %d", exitCode) - } - }) - t.Run("RunTty", func(t *testing.T) { - processMonitor.EXPECT().AddProcess(gomock.Any(), "echo.1").Times(1) - processMonitor.EXPECT().RemoveProcess("echo.1").Times(1) - - logManager.EXPECT().AddLine("echo.1", gomock.Any()).MinTimes(1) - exitCode, err := processManager.RunTty("echo.1", []string{"echo", "hello"}, "/tmp") - - if err != nil { - t.Error(err) - } - - if *exitCode != 0 { - t.Errorf("expected 0, got %d", exitCode) - } - }) -} diff --git a/internal/core/services/process_monitor.go b/internal/core/services/process_monitor.go deleted file mode 100644 index b03a4528..00000000 --- a/internal/core/services/process_monitor.go +++ /dev/null @@ -1,299 +0,0 @@ -package services - -import ( - "fmt" - "net" - "sync" - "time" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - processutil "github.com/shirou/gopsutil/process" - "go.uber.org/zap" -) - -var ErrorProcessNotActive = fmt.Errorf("process not active") - -type ProcessMonitor struct { - exportedMetrics *ProcessMonitorMetricsExported - processes map[string]*processutil.Process - mu sync.Mutex -} - -type ProcessMonitorMetricsExported struct { - prometheusCpuUsage *prometheus.GaugeVec - prometheusMemoryUsage *prometheus.GaugeVec - prometheusConnectionCount *prometheus.GaugeVec -} - -func NewProcessMonitor(enableMetrics bool) *ProcessMonitor { - - pm := &ProcessMonitor{ - processes: make(map[string]*processutil.Process), - } - - if enableMetrics { - pm.exportedMetrics = NewProcessMonitorMetricsExported() - } - - return pm -} - -func NewProcessMonitorMetricsExported() *ProcessMonitorMetricsExported { - return &ProcessMonitorMetricsExported{ - prometheusCpuUsage: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: "druid", - Name: "cpu1", - Help: "CPU usage", - }, []string{"process"}), - prometheusMemoryUsage: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "druid", - Name: "memory", - Help: "Memory usage", - }, []string{"process"}), - prometheusConnectionCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "druid", - Name: "connections", - Help: "Connections", - }, []string{"process"}), - } -} - -func (po *ProcessMonitor) ShutdownPromMetrics() { - if po.exportedMetrics == nil { - - logger.Log().Warn("No metrics registered, skipping") - return - } - logger.Log().Info("Shutting down prometheus metrics") - prometheus.DefaultRegisterer.Unregister(po.exportedMetrics.prometheusCpuUsage) - prometheus.DefaultRegisterer.Unregister(po.exportedMetrics.prometheusMemoryUsage) - prometheus.DefaultRegisterer.Unregister(po.exportedMetrics.prometheusConnectionCount) -} - -func (po *ProcessMonitor) StartMonitoring() { - ticker := time.NewTicker(time.Second) - done := make(chan bool) - go func() { - for { - select { - case <-done: - return - case <-ticker.C: - po.RefreshMetrics() - } - } - }() -} - -func (po *ProcessMonitor) RefreshMetrics() { - po.mu.Lock() - defer po.mu.Unlock() - for name, process := range po.processes { - - _, err := po.GetProcessMetric(name, process) - if err != nil { - logger.Log().Error("Error when retrieving process Metrics", - zap.String(logger.LogKeyContext, logger.LogContextMonitor), - zap.String("processName", name), - zap.Error(err), - ) - } - } -} - -func (po *ProcessMonitor) GetProcessMetric(name string, p *processutil.Process) (*domain.ProcessMonitorMetrics, error) { - - running, err := p.IsRunning() - if err != nil { - return nil, err - } - if running { - memory, cpu, cons := calcUsageOfProcess(p, true) - - if po.exportedMetrics != nil { - po.exportedMetrics.prometheusCpuUsage.With(prometheus.Labels{"process": name}).Set(cpu) - po.exportedMetrics.prometheusMemoryUsage.With(prometheus.Labels{"process": name}).Set(float64(memory)) - po.exportedMetrics.prometheusConnectionCount.With(prometheus.Labels{"process": name}).Set(float64(len(cons))) - } - - return &domain.ProcessMonitorMetrics{ - Cpu: cpu, - Memory: memory, - Connections: cons, - Pid: int(p.Pid), - }, nil - } else { - - return nil, ErrorProcessNotActive - } -} - -func (po *ProcessMonitor) AddProcess(pid int32, name string) { - process, err := processutil.NewProcess(pid) - if err != nil { - logger.Log().Error("Error when adding process", - zap.String(logger.LogKeyContext, logger.LogContextMonitor), - zap.Int32("pid", pid), - zap.Error(err), - ) - return - } - po.mu.Lock() - defer po.mu.Unlock() - - po.processes[name] = process -} - -func (po *ProcessMonitor) RemoveProcess(name string) { - po.mu.Lock() - defer po.mu.Unlock() - - delete(po.processes, name) -} - -func calcUsageOfProcess(p *processutil.Process, excludePrivateIP bool) (int, float64, []string) { - if b, err := p.IsRunning(); !b || err != nil { - return 0, 0, []string{} - } - - memory, _ := p.MemoryInfo() - cpu1, _ := p.CPUPercent() - // cpu2, _ := p.CPUAffinity() - connections, _ := p.Connections() - var memoryNum int - - if memory != nil { - memoryNum = int(memory.RSS) - } else { - memoryNum = 0 - } - - children, _ := p.Children() - - var cons = []string{} - for _, con := range connections { - - if excludePrivateIP && isPrivateIP(net.ParseIP(con.Raddr.IP)) { - continue - } - if con.Raddr.IP == "" || con.Raddr.Port == 0 { - continue - } - - cons = append(cons, con.Raddr.IP+":"+fmt.Sprint(con.Raddr.Port)) - } - //recursivly fetch process tree - for _, cp := range children { - cmem, ccpu, ccons := calcUsageOfProcess(cp, true) - memoryNum += cmem - cpu1 += ccpu - cons = append(cons, ccons...) - } - - return memoryNum, cpu1, cons -} - -func (p *ProcessMonitor) GetAllProcessesMetrics() map[string]*domain.ProcessMonitorMetrics { - - metrics := make(map[string]*domain.ProcessMonitorMetrics) - - for key, process := range p.processes { - m, _ := p.GetProcessMetric(key, process) - metrics[key] = m - } - return metrics -} - -func (p *ProcessMonitor) GetPsTrees() map[string]*domain.ProcessTreeRoot { - - trees := make(map[string]*domain.ProcessTreeRoot) - - for key, process := range p.processes { - tree := GetTree(process) - trees[key] = tree - } - - return trees - -} - -var privateIPBlocks []*net.IPNet - -func isPrivateIP(ip net.IP) bool { - if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() { - return true - } - - for _, block := range privateIPBlocks { - if block.Contains(ip) { - return true - } - } - return false -} - -func init() { - for _, cidr := range []string{ - "127.0.0.0/8", // IPv4 loopback - "10.0.0.0/8", // RFC1918 - "172.16.0.0/12", // RFC1918 - "192.168.0.0/16", // RFC1918 - "169.254.0.0/16", // RFC3927 link-local - "::1/128", // IPv6 loopback - "fe80::/10", // IPv6 link-local - "fc00::/7", // IPv6 unique local addr - } { - _, block, err := net.ParseCIDR(cidr) - if err != nil { - panic(fmt.Errorf("parse error on %q: %v", cidr, err)) - } - privateIPBlocks = append(privateIPBlocks, block) - } -} - -func GetTree(p *processutil.Process) *domain.ProcessTreeRoot { - tree := &domain.ProcessTreeRoot{ - Root: &domain.ProcessTreeNode{}, - } - GetTreeRec(p, tree, tree.Root) - return tree -} - -func GetTreeRec(process *processutil.Process, tree *domain.ProcessTreeRoot, current *domain.ProcessTreeNode) { - current.Process = process - current.CpuPercent, _ = process.CPUPercent() - current.Memory, _ = process.MemoryInfo() - current.MemoryEx, _ = process.MemoryInfoEx() - current.IOCounters, _ = process.IOCounters() - current.Name, _ = process.Name() - current.Cmdline, _ = process.Cmdline() - current.Gids, _ = process.Gids() - current.Username, _ = process.Username() - - tree.TotalCpuPercent += current.CpuPercent - if current.Memory != nil { - tree.TotalMemoryRss += current.Memory.RSS - tree.TotalMemoryVms += current.Memory.VMS - tree.TotalMemorySwap += current.Memory.Swap - } - if current.IOCounters != nil { - tree.TotalIoCountersRead += current.IOCounters.ReadCount - tree.TotalIoCountersWrite += current.IOCounters.WriteCount - } - - var childs []*domain.ProcessTreeNode - children, err := process.Children() - if err != nil { - return - } - for _, child := range children { - childTree := &domain.ProcessTreeNode{} - GetTreeRec(child, tree, childTree) - childs = append(childs, childTree) - } - - current.Children = childs -} diff --git a/internal/core/services/queue_manager.go b/internal/core/services/queue_manager.go index 1b99199c..6f908982 100644 --- a/internal/core/services/queue_manager.go +++ b/internal/core/services/queue_manager.go @@ -1,6 +1,7 @@ package services import ( + "errors" "fmt" "sync" "time" @@ -16,37 +17,40 @@ var ErrCommandNotFound = fmt.Errorf("command not found") var ErrCommandDoneOnce = fmt.Errorf("command is already done and has run mode once") type AddItemOptions struct { - Remember bool Wait bool RunAfterExecution func() + Force bool } +type QueueStatusObserver func(command string, status domain.ScrollLockStatus, exitCode *int) + type QueueManager struct { - mu sync.Mutex - runQueueMu sync.Mutex - scrollService ports.ScrollServiceInterface - processLauncher ports.ProcedureLauchnerInterface - commandQueue map[string]*domain.QueueItem - taskChan chan string - taskDoneChan chan struct{} - shutdownChan chan struct{} - notifierChan []chan []string - callbacksPostRun map[string]func() + mu sync.Mutex + runQueueMu sync.Mutex + scrollService ports.ScrollServiceInterface + procedureLauncher ports.ProcedureLauchnerInterface + commandQueue map[string]*domain.QueueItem + taskChan chan string + taskDoneChan chan struct{} + shutdownChan chan struct{} + notifierChan []chan []string + callbacksPostRun map[string]func() + statusObserver QueueStatusObserver } func NewQueueManager( scrollService ports.ScrollServiceInterface, - processLauncher ports.ProcedureLauchnerInterface, + procedureLauncher ports.ProcedureLauchnerInterface, ) *QueueManager { return &QueueManager{ - scrollService: scrollService, - processLauncher: processLauncher, - commandQueue: make(map[string]*domain.QueueItem), - taskChan: make(chan string, 100), // FIXED: Buffered channel - taskDoneChan: make(chan struct{}, 1), // FIXED: Buffered channel - shutdownChan: make(chan struct{}), - notifierChan: make([]chan []string, 0), - callbacksPostRun: make(map[string]func()), + scrollService: scrollService, + procedureLauncher: procedureLauncher, + commandQueue: make(map[string]*domain.QueueItem), + taskChan: make(chan string, 100), // FIXED: Buffered channel + taskDoneChan: make(chan struct{}, 1), // FIXED: Buffered channel + shutdownChan: make(chan struct{}), + notifierChan: make([]chan []string, 0), + callbacksPostRun: make(map[string]func()), } } @@ -55,16 +59,12 @@ func (sc *QueueManager) workItem(cmd string) error { if queueItem == nil { return fmt.Errorf("command %s not found", cmd) } - changeStatus := queueItem.UpdateLockStatus logger.Log().Debug("Running command", zap.String("cmd", cmd), - zap.Bool("changeStatus", changeStatus), ) - return sc.processLauncher.Run(cmd, func(cmd string) error { - return sc.AddTempItem(cmd) - }) + return sc.procedureLauncher.Run(cmd) } func (sc *QueueManager) notify() { @@ -93,15 +93,15 @@ func (sc *QueueManager) notify() { } func (sc *QueueManager) AddTempItem(cmd string) error { - return sc.addQueueItem(cmd, AddItemOptions{ - Remember: false, - }) + return sc.addQueueItem(cmd, AddItemOptions{}) +} + +func (sc *QueueManager) AddForcedItem(cmd string) error { + return sc.addQueueItem(cmd, AddItemOptions{Force: true}) } func (sc *QueueManager) AddAndRememberItem(cmd string) error { - return sc.addQueueItem(cmd, AddItemOptions{ - Remember: true, - }) + return sc.addQueueItem(cmd, AddItemOptions{}) } func (sc *QueueManager) AddShutdownItem(cmd string) error { @@ -118,18 +118,26 @@ func (sc *QueueManager) AddItemWithCallback(cmd string, cb func()) error { }) } +func (sc *QueueManager) RememberDoneItem(cmd string) { + sc.mu.Lock() + defer sc.mu.Unlock() + if _, ok := sc.commandQueue[cmd]; ok { + return + } + sc.commandQueue[cmd] = &domain.QueueItem{ + Status: domain.ScrollLockStatusDone, + } +} + func (sc *QueueManager) AddTempItemWithWait(cmd string) error { return sc.addQueueItem(cmd, AddItemOptions{ - Remember: false, - Wait: true, + Wait: true, }) } func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { sc.mu.Lock() - setLock := options.Remember - logger.Log().Debug("Running command", zap.String("cmd", cmd), ) @@ -141,11 +149,6 @@ func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { return err } - //Functions that run once, should be remembered, but should only have waiting status, when the are called explicitly - if command.Run == domain.RunModeOnce { - setLock = true - } - if value, ok := sc.commandQueue[cmd]; ok { if value.Status != domain.ScrollLockStatusDone && value.Status != domain.ScrollLockStatusError { @@ -153,7 +156,7 @@ func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { return ErrAlreadyInQueue } - if value.Status == domain.ScrollLockStatusDone && command.Run == domain.RunModeOnce { + if value.Status == domain.ScrollLockStatusDone && command.Run == domain.RunModeOnce && !options.Force { sc.mu.Unlock() return ErrCommandDoneOnce } @@ -165,9 +168,8 @@ func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { } item := &domain.QueueItem{ - Status: domain.ScrollLockStatusWaiting, - UpdateLockStatus: setLock, - DoneChan: doneChan, + Status: domain.ScrollLockStatusWaiting, + DoneChan: doneChan, } if options.RunAfterExecution != nil { @@ -175,15 +177,7 @@ func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { } sc.commandQueue[cmd] = item - - if setLock { - lock, err := sc.scrollService.GetLock() - if err != nil { - sc.mu.Unlock() - return err - } - lock.SetStatus(cmd, domain.ScrollLockStatusWaiting, nil) - } + sc.observeStatusLocked(cmd, domain.ScrollLockStatusWaiting, nil) sc.mu.Unlock() @@ -212,42 +206,36 @@ func (sc *QueueManager) RegisterCallbacks(callbacks map[string]func()) { } } -func (sc *QueueManager) QueueLockFile() error { - lock, err := sc.scrollService.GetLock() +func (sc *QueueManager) SetStatusObserver(observer QueueStatusObserver) { + sc.mu.Lock() + defer sc.mu.Unlock() + sc.statusObserver = observer +} - if err != nil { - return err - } - for cmd, status := range lock.Statuses { - //convert legacy command names +func (sc *QueueManager) HydrateCommandStatuses(statuses map[string]domain.LockStatus) error { + for cmd, status := range statuses { command, err := sc.scrollService.GetCommand(cmd) if err != nil { return err } if status.Status == domain.ScrollLockStatusDone { - //check callback if callback, ok := sc.callbacksPostRun[cmd]; ok && callback != nil { callback() } - //not sure if this can even happen for "restart", maybe on updates if command.Run != domain.RunModeRestart && command.Run != domain.RunModePersistent { - - //TODO: use addQueueItem here sc.mu.Lock() sc.commandQueue[cmd] = &domain.QueueItem{ - Status: domain.ScrollLockStatusDone, - UpdateLockStatus: true, + Status: domain.ScrollLockStatusDone, } sc.mu.Unlock() continue } } - status.Status = domain.ScrollLockStatusWaiting sc.addQueueItem(cmd, AddItemOptions{ - Remember: true, + RunAfterExecution: nil, }) } @@ -313,7 +301,7 @@ func (sc *QueueManager) RunQueue() { } //if done and not a restart/persistent mode, skip - isRestartMode := command.Run == domain.RunModeRestart || command.Run == domain.RunModePersistent + isRestartMode := command.Run == domain.RunModeRestart if status == domain.ScrollLockStatusDone && !isRestartMode { continue } @@ -336,8 +324,9 @@ func (sc *QueueManager) RunQueue() { } if dependenciesReady { item := sc.GetQueueItem(cmd) - //we only run one process at a time, this is not optimal, but it is simple - sc.setStatus(cmd, domain.ScrollLockStatusRunning, item.UpdateLockStatus) + runMode := command.Run + // We only run one command at a time to keep dependency resolution deterministic. + sc.setStatus(cmd, domain.ScrollLockStatusRunning, nil) logger.Log().Info("Running command", zap.String("command", cmd)) go func(c string, i *domain.QueueItem) { defer func() { @@ -359,26 +348,20 @@ func (sc *QueueManager) RunQueue() { startedAt := time.Now() err := sc.workItem(c) - isRestartMode := command.Run == domain.RunModeRestart || command.Run == domain.RunModePersistent + isRestartMode := runMode == domain.RunModeRestart if err != nil { logger.Log().Error("Error running command", zap.String("command", c), zap.Error(err)) if !isRestartMode { - sc.setError(c, err, i.UpdateLockStatus) + sc.setError(c, err) return } } if isRestartMode { - // For persistent mode, mark as done (don't auto-restart on graceful shutdown) - // For restart mode, mark as waiting (will auto-restart) - if command.Run == domain.RunModePersistent { - sc.setStatus(c, domain.ScrollLockStatusDone, i.UpdateLockStatus) - } else { - // Set status to waiting immediately so shutdown captures correct state - sc.setStatus(c, domain.ScrollLockStatusWaiting, i.UpdateLockStatus) - } - + // Set status to waiting immediately so shutdown captures correct state. + sc.setStatus(c, domain.ScrollLockStatusWaiting, nil) + // Exponential backoff for fast restarts (1s, 2s, 4s, ... max 5m) if time.Since(startedAt) < 30*time.Second { i.RestartCount++ @@ -397,7 +380,7 @@ func (sc *QueueManager) RunQueue() { } } else { logger.Log().Info("Command done", zap.String("command", c)) - sc.setStatus(c, domain.ScrollLockStatusDone, i.UpdateLockStatus) + sc.setStatus(c, domain.ScrollLockStatusDone, nil) } }(cmd, item) @@ -416,6 +399,11 @@ func (sc *QueueManager) WaitUntilEmpty() { sc.mu.Lock() sc.notifierChan = append(sc.notifierChan, notifier) + if !sc.hasActiveItemsLocked() { + sc.removeNotifierLocked(notifier) + sc.mu.Unlock() + return + } sc.mu.Unlock() for { @@ -424,12 +412,7 @@ func (sc *QueueManager) WaitUntilEmpty() { if len(cmds) == 0 { // remove notifier sc.mu.Lock() - for i, n := range sc.notifierChan { - if n == notifier { - sc.notifierChan = append(sc.notifierChan[:i], sc.notifierChan[i+1:]...) - break - } - } + sc.removeNotifierLocked(notifier) sc.mu.Unlock() return } @@ -437,6 +420,24 @@ func (sc *QueueManager) WaitUntilEmpty() { } +func (sc *QueueManager) hasActiveItemsLocked() bool { + for _, item := range sc.commandQueue { + if item.Status != domain.ScrollLockStatusDone && item.Status != domain.ScrollLockStatusError { + return true + } + } + return false +} + +func (sc *QueueManager) removeNotifierLocked(notifier chan []string) { + for i, n := range sc.notifierChan { + if n == notifier { + sc.notifierChan = append(sc.notifierChan[:i], sc.notifierChan[i+1:]...) + return + } + } +} + func (sc *QueueManager) GetQueueItem(cmd string) *domain.QueueItem { sc.mu.Lock() defer sc.mu.Unlock() @@ -457,35 +458,23 @@ func (sc *QueueManager) getStatus(cmd string) domain.ScrollLockStatus { return domain.ScrollLockStatusDone } -func (sc *QueueManager) setError(cmd string, err error, writeLock bool) { +func (sc *QueueManager) setError(cmd string, err error) { sc.mu.Lock() defer sc.mu.Unlock() if value, ok := sc.commandQueue[cmd]; ok { value.Status = domain.ScrollLockStatusError value.Error = err } - if writeLock { - lock, err := sc.scrollService.GetLock() - if err != nil { - return - } - lock.SetStatus(cmd, domain.ScrollLockStatusError, nil) - } + sc.observeStatusLocked(cmd, domain.ScrollLockStatusError, commandExitCode(err)) } -func (sc *QueueManager) setStatus(cmd string, status domain.ScrollLockStatus, writeLock bool) { +func (sc *QueueManager) setStatus(cmd string, status domain.ScrollLockStatus, exitCode *int) { sc.mu.Lock() defer sc.mu.Unlock() if value, ok := sc.commandQueue[cmd]; ok { value.Status = status } - if writeLock { - lock, err := sc.scrollService.GetLock() - if err != nil { - return - } - lock.SetStatus(cmd, status, nil) - } + sc.observeStatusLocked(cmd, status, exitCode) } func (sc *QueueManager) GetQueue() map[string]domain.ScrollLockStatus { @@ -498,3 +487,18 @@ func (sc *QueueManager) GetQueue() map[string]domain.ScrollLockStatus { } return queue } + +func (sc *QueueManager) observeStatusLocked(cmd string, status domain.ScrollLockStatus, exitCode *int) { + if sc.statusObserver == nil { + return + } + sc.statusObserver(cmd, status, exitCode) +} + +func commandExitCode(err error) *int { + var commandErr *domain.CommandExecutionError + if err != nil && errors.As(err, &commandErr) { + return &commandErr.ExitCode + } + return nil +} diff --git a/internal/core/services/queue_manager_test.go b/internal/core/services/queue_manager_test.go index 3a97458a..dc3e3149 100644 --- a/internal/core/services/queue_manager_test.go +++ b/internal/core/services/queue_manager_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/Masterminds/semver/v3" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/services" mock_ports "github.com/highcard-dev/daemon/test/mock" @@ -53,45 +52,30 @@ func TestQueueManager(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - logManager := mock_ports.NewMockLogManagerInterface(ctrl) - processMonitor := mock_ports.NewMockProcessMonitorInterface(ctrl) - ociRegistryMock := mock_ports.NewMockOciRegistryInterface(ctrl) - pluginManager := mock_ports.NewMockPluginManagerInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) - consoleManager := services.NewConsoleManager(logManager) - processManager := services.NewProcessManager(logManager, consoleManager, processMonitor) - procedureLauncher, err := services.NewProcedureLauncher(ociRegistryMock, processManager, pluginManager, consoleManager, logManager, scrollService, "external") + procedureLauncher, err := services.NewProcedureLauncher(scrollService, runtimeBackend, "/tmp") if err != nil { t.Error(err) } queueManager := services.NewQueueManager(scrollService, procedureLauncher) - processMonitor.EXPECT().AddProcess(gomock.Any(), "test.0").AnyTimes() - processMonitor.EXPECT().RemoveProcess("test.0").AnyTimes() + exitCode := 0 + runtimeBackend.EXPECT().RunCommand(gomock.Any()).Return(&exitCode, nil).Times(testCase.AccualExecution) scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{ Run: testCase.RunMode, Procedures: []*domain.Procedure{ { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello"}, + Image: "alpine:3.20", + Command: []string{"echo", "hello"}, }, }, }, nil).AnyTimes() - pluginManager.EXPECT().HasMode(gomock.Any()).Return(false).AnyTimes() - - logManager.EXPECT().AddLine("test.0", []byte("hello\n")).Times(testCase.AccualExecution) - - scrollService.EXPECT().GetLock().Return(&domain.ScrollLock{ - Statuses: map[string]domain.LockStatus{}, - ScrollVersion: semver.MustParse("1.0.0"), - ScrollName: "test", - }, nil).AnyTimes() - scrollService.EXPECT().GetCwd().Return("/tmp").AnyTimes() + scrollService.EXPECT().GetFile().Return(&domain.File{}).AnyTimes() go queueManager.Work() @@ -111,32 +95,21 @@ func TestQueueManager(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - processMonitor := mock_ports.NewMockProcessMonitorInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) queueManager := services.NewQueueManager(scrollService, procedureLauncher) - processMonitor.EXPECT().AddProcess(gomock.Any(), "test").AnyTimes() - processMonitor.EXPECT().RemoveProcess("test").AnyTimes() - scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{ Run: testCase.RunMode, Procedures: []*domain.Procedure{ { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello"}, + Image: "alpine:3.20", + Command: []string{"echo", "hello"}, }, }, }, nil).AnyTimes() - scrollService.EXPECT().GetLock().Return(&domain.ScrollLock{ - Statuses: map[string]domain.LockStatus{}, - ScrollVersion: semver.MustParse("1.0.0"), - ScrollName: "test", - }, nil).AnyTimes() - scrollService.EXPECT().GetCwd().Return("/tmp").AnyTimes() times := testCase.AccualExecution @@ -145,7 +118,7 @@ func TestQueueManager(t *testing.T) { } first := true - procedureLauncher.EXPECT().Run(gomock.Any(), gomock.Any()).DoAndReturn(func(cmd string, runCommandCb func(cmd string) error) error { + procedureLauncher.EXPECT().Run(gomock.Any()).DoAndReturn(func(cmd string) error { if first { first = false return fmt.Errorf("error") @@ -169,71 +142,6 @@ func TestQueueManager(t *testing.T) { } }) - t.Run(fmt.Sprintf("AddItem Command (RunMode: %s, Repeat: %d)", testCase.RunMode, testCase.Repeat), func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - logManager := mock_ports.NewMockLogManagerInterface(ctrl) - processMonitor := mock_ports.NewMockProcessMonitorInterface(ctrl) - ociRegistryMock := mock_ports.NewMockOciRegistryInterface(ctrl) - pluginManager := mock_ports.NewMockPluginManagerInterface(ctrl) - scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - - consoleManager := services.NewConsoleManager(logManager) - processManager := services.NewProcessManager(logManager, consoleManager, processMonitor) - procedureLauncher, err := services.NewProcedureLauncher(ociRegistryMock, processManager, pluginManager, consoleManager, logManager, scrollService, "external") - if err != nil { - t.Error(err) - } - queueManager := services.NewQueueManager(scrollService, procedureLauncher) - - processMonitor.EXPECT().AddProcess(gomock.Any(), "test.0").AnyTimes() - processMonitor.EXPECT().RemoveProcess("test.0").AnyTimes() - - scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{ - Run: testCase.RunMode, - Procedures: []*domain.Procedure{ - { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello"}, - }, - }, - }, nil).AnyTimes() - - scrollService.EXPECT().GetCommand("test_command").Return(&domain.CommandInstructionSet{ - Procedures: []*domain.Procedure{ - { - Mode: "command", - Wait: nil, - Data: "test", - }, - }, - }, nil).AnyTimes() - - pluginManager.EXPECT().HasMode(gomock.Any()).Return(false).AnyTimes() - - logManager.EXPECT().AddLine("test.0", []byte("hello\n")).Times(testCase.AccualExecution) - - scrollService.EXPECT().GetLock().Return(&domain.ScrollLock{ - Statuses: map[string]domain.LockStatus{}, - ScrollVersion: semver.MustParse("1.0.0"), - ScrollName: "test", - }, nil).AnyTimes() - - scrollService.EXPECT().GetCwd().Return("/tmp").AnyTimes() - - go queueManager.Work() - - for i := 0; i < testCase.Repeat; i++ { - err := queueManager.AddTempItem("test_command") - if err != nil { - t.Error(err) - } - - queueManager.WaitUntilEmpty() - } - }) } t.Run("AddItem Deep Need Structure", func(t *testing.T) { @@ -241,39 +149,24 @@ func TestQueueManager(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - logManager := mock_ports.NewMockLogManagerInterface(ctrl) - processMonitor := mock_ports.NewMockProcessMonitorInterface(ctrl) - ociRegistryMock := mock_ports.NewMockOciRegistryInterface(ctrl) - pluginManager := mock_ports.NewMockPluginManagerInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) - consoleManager := services.NewConsoleManager(logManager) - processManager := services.NewProcessManager(logManager, consoleManager, processMonitor) - procedureLauncher, err := services.NewProcedureLauncher(ociRegistryMock, processManager, pluginManager, consoleManager, logManager, scrollService, "external") + procedureLauncher, err := services.NewProcedureLauncher(scrollService, runtimeBackend, "/tmp") if err != nil { t.Error(err) } queueManager := services.NewQueueManager(scrollService, procedureLauncher) - lock := &domain.ScrollLock{ - Statuses: map[string]domain.LockStatus{}, - } - scrollService.EXPECT().GetLock().Return(lock, nil).AnyTimes() - processMonitor.EXPECT().AddProcess(gomock.Any(), gomock.Any()).Times(4) - //processMonitor.EXPECT().AddProcess(gomock.Any(), "dep1").Times(1) - //processMonitor.EXPECT().AddProcess(gomock.Any(), "test").Times(1) - - processMonitor.EXPECT().RemoveProcess(gomock.Any()).Times(4) - //processMonitor.EXPECT().RemoveProcess("dep1").Times(1) - //processMonitor.EXPECT().RemoveProcess("test").Times(1) + exitCode := 0 + runtimeBackend.EXPECT().RunCommand(gomock.Any()).Return(&exitCode, nil).Times(4) scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{ Needs: []string{"dep1"}, Procedures: []*domain.Procedure{ { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello"}, + Image: "alpine:3.20", + Command: []string{"echo", "hello"}, }, }, }, nil).AnyTimes() @@ -282,9 +175,8 @@ func TestQueueManager(t *testing.T) { Needs: []string{"dep2.1", "dep2.2"}, Procedures: []*domain.Procedure{ { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello1"}, + Image: "alpine:3.20", + Command: []string{"echo", "hello1"}, }, }, }, nil).AnyTimes() @@ -292,35 +184,22 @@ func TestQueueManager(t *testing.T) { Run: domain.RunModeOnce, Procedures: []*domain.Procedure{ { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello2.1"}, + Image: "alpine:3.20", + Command: []string{"echo", "hello2.1"}, }, }, }, nil).AnyTimes() scrollService.EXPECT().GetCommand("dep2.2").Return(&domain.CommandInstructionSet{ Procedures: []*domain.Procedure{ { - Mode: "exec", - Wait: nil, - Data: []interface{}{"echo", "hello2.2"}, + Image: "alpine:3.20", + Command: []string{"echo", "hello2.2"}, }, }, }, nil).AnyTimes() - pluginManager.EXPECT().HasMode(gomock.Any()).Return(false).AnyTimes() - - logManager.EXPECT().AddLine(gomock.Any(), gomock.Any()).Times(4) - //logManager.EXPECT().AddLine("process.dep1", gomock.Eq([]byte("hello1\n"))).Times(1) - //logManager.EXPECT().AddLine("test.0", gomock.Eq([]byte("hello\n"))).Times(1) - - scrollService.EXPECT().GetLock().Return(&domain.ScrollLock{ - Statuses: map[string]domain.LockStatus{}, - ScrollVersion: semver.MustParse("1.0.0"), - ScrollName: "test", - }, nil).AnyTimes() - scrollService.EXPECT().GetCwd().Return("/tmp").AnyTimes() + scrollService.EXPECT().GetFile().Return(&domain.File{}).AnyTimes() go queueManager.Work() err = queueManager.AddTempItem("test") @@ -330,8 +209,161 @@ func TestQueueManager(t *testing.T) { queueManager.WaitUntilEmpty() - if len(lock.Statuses) != 1 { - t.Errorf("Lock status must be 1 (dep2.1) but got %d", len(lock.Statuses)) + queue := queueManager.GetQueue() + if queue["dep2.1"] != domain.ScrollLockStatusDone { + t.Errorf("dep2.1 status must be done, got %s", queue["dep2.1"]) } }) } + +func TestQueueManagerStatusObserver(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) + queueManager := services.NewQueueManager(scrollService, procedureLauncher) + + scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{}, nil).AnyTimes() + procedureLauncher.EXPECT().Run("test").Return(nil) + + observed := []domain.ScrollLockStatus{} + queueManager.SetStatusObserver(func(command string, status domain.ScrollLockStatus, exitCode *int) { + if command == "test" { + observed = append(observed, status) + } + }) + + go queueManager.Work() + if err := queueManager.AddTempItem("test"); err != nil { + t.Fatal(err) + } + queueManager.WaitUntilEmpty() + + want := []domain.ScrollLockStatus{ + domain.ScrollLockStatusWaiting, + domain.ScrollLockStatusRunning, + domain.ScrollLockStatusDone, + } + if len(observed) != len(want) { + t.Fatalf("expected %d observed statuses, got %d: %v", len(want), len(observed), observed) + } + for i := range want { + if observed[i] != want[i] { + t.Fatalf("status %d = %s, want %s", i, observed[i], want[i]) + } + } +} + +func TestQueueManagerPersistentCommandCompletesWithoutLooping(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) + queueManager := services.NewQueueManager(scrollService, procedureLauncher) + + scrollService.EXPECT().GetCommand("serve").Return(&domain.CommandInstructionSet{Run: domain.RunModePersistent}, nil).AnyTimes() + procedureLauncher.EXPECT().Run("serve").Return(nil).Times(1) + + go queueManager.Work() + if err := queueManager.AddTempItem("serve"); err != nil { + t.Fatal(err) + } + queueManager.WaitUntilEmpty() + + if got := queueManager.GetQueue()["serve"]; got != domain.ScrollLockStatusDone { + t.Fatalf("serve = %s, want done", got) + } +} + +func TestQueueManagerRememberDoneItemSatisfiesDependency(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) + queueManager := services.NewQueueManager(scrollService, procedureLauncher) + queueManager.RememberDoneItem("verify") + + scrollService.EXPECT().GetCommand("report").Return(&domain.CommandInstructionSet{Needs: []string{"verify"}}, nil).AnyTimes() + scrollService.EXPECT().GetCommand("verify").Return(&domain.CommandInstructionSet{}, nil).AnyTimes() + procedureLauncher.EXPECT().Run("report").Return(nil) + + go queueManager.Work() + if err := queueManager.AddTempItem("report"); err != nil { + t.Fatal(err) + } + queueManager.WaitUntilEmpty() + + queue := queueManager.GetQueue() + if queue["report"] != domain.ScrollLockStatusDone { + t.Fatalf("report = %s, want done; queue=%#v", queue["report"], queue) + } + if queue["verify"] != domain.ScrollLockStatusDone { + t.Fatalf("verify = %s, want done; queue=%#v", queue["verify"], queue) + } +} + +func TestQueueManagerHydrateCommandStatuses(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) + queueManager := services.NewQueueManager(scrollService, procedureLauncher) + + scrollService.EXPECT().GetCommand("install").Return(&domain.CommandInstructionSet{Run: domain.RunModeOnce}, nil).AnyTimes() + scrollService.EXPECT().GetCommand("start").Return(&domain.CommandInstructionSet{Run: domain.RunModeRestart}, nil).AnyTimes() + scrollService.EXPECT().GetCommand("serve").Return(&domain.CommandInstructionSet{Run: domain.RunModePersistent}, nil).AnyTimes() + scrollService.EXPECT().GetCommand("repair").Return(&domain.CommandInstructionSet{}, nil).AnyTimes() + + if err := queueManager.HydrateCommandStatuses(map[string]domain.LockStatus{ + "install": {Status: domain.ScrollLockStatusDone}, + "start": {Status: domain.ScrollLockStatusDone}, + "serve": {Status: domain.ScrollLockStatusDone}, + "repair": {Status: domain.ScrollLockStatusError}, + }); err != nil { + t.Fatal(err) + } + + queue := queueManager.GetQueue() + if queue["install"] != domain.ScrollLockStatusDone { + t.Fatalf("install = %s, want done", queue["install"]) + } + if queue["start"] != domain.ScrollLockStatusWaiting { + t.Fatalf("start = %s, want waiting", queue["start"]) + } + if queue["serve"] != domain.ScrollLockStatusWaiting { + t.Fatalf("serve = %s, want waiting", queue["serve"]) + } + if queue["repair"] != domain.ScrollLockStatusWaiting { + t.Fatalf("repair = %s, want waiting", queue["repair"]) + } +} + +func TestQueueManagerAddForcedItemRerunsDoneOnceCommand(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) + queueManager := services.NewQueueManager(scrollService, procedureLauncher) + + scrollService.EXPECT().GetCommand("start").Return(&domain.CommandInstructionSet{Run: domain.RunModeOnce}, nil).AnyTimes() + procedureLauncher.EXPECT().Run("start").Return(nil).Times(2) + + go queueManager.Work() + + if err := queueManager.AddTempItem("start"); err != nil { + t.Fatal(err) + } + queueManager.WaitUntilEmpty() + if err := queueManager.AddTempItem("start"); err != services.ErrCommandDoneOnce { + t.Fatalf("AddTempItem error = %v, want ErrCommandDoneOnce", err) + } + if err := queueManager.AddForcedItem("start"); err != nil { + t.Fatal(err) + } + queueManager.WaitUntilEmpty() +} diff --git a/internal/core/services/registry/oci.go b/internal/core/services/registry/oci.go index 373cac2b..34c41f85 100644 --- a/internal/core/services/registry/oci.go +++ b/internal/core/services/registry/oci.go @@ -44,9 +44,15 @@ type OciClient struct { func NewOciClient(credentialStore *CredentialStore) *OciClient { return &OciClient{ credentialStore: credentialStore, + plainHTTP: plainHTTPFromEnv(), } } +func plainHTTPFromEnv() bool { + value := strings.ToLower(strings.TrimSpace(os.Getenv("DRUID_REGISTRY_PLAIN_HTTP"))) + return value == "1" || value == "true" || value == "yes" +} + func (c *OciClient) GetRepo(repoUrl string) (*remote.Repository, error) { repo, err := remote.NewRepository(repoUrl) if err != nil { diff --git a/internal/core/services/registry/oci_test.go b/internal/core/services/registry/oci_test.go index 920429c2..b5ea3a1a 100644 --- a/internal/core/services/registry/oci_test.go +++ b/internal/core/services/registry/oci_test.go @@ -120,7 +120,7 @@ func fakeRegistry(t *testing.T) *httptest.Server { // in-process OCI registry. This verifies the data-chunk file paths are // resolved correctly (store-relative) and do not get doubled. // -// Regression test for: when --cwd is a relative path like +// Regression test for: when the scroll dir is a relative path like // ./scrolls/minecraft/1.17, the ORAS file store root is resolved to an // absolute path internally. Passing the full relative chunkFullPath // (scrolls/minecraft/1.17/data/) to fs.Add caused the store to look diff --git a/internal/core/services/runtime_scroll_manager.go b/internal/core/services/runtime_scroll_manager.go new file mode 100644 index 00000000..e5b331f3 --- /dev/null +++ b/internal/core/services/runtime_scroll_manager.go @@ -0,0 +1,249 @@ +package services + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" +) + +type RuntimeScrollManager struct { + store RuntimeScrollStore +} + +var ErrScrollAlreadyExists = errors.New("runtime scroll already exists") + +func NewRuntimeScrollManager(store RuntimeScrollStore) *RuntimeScrollManager { + return &RuntimeScrollManager{store: store} +} + +func (m *RuntimeScrollManager) Create(artifact string, requestedName string, scrollRoot string, dataRoot string, scrollYAML []byte) (*domain.RuntimeScroll, error) { + if artifact == "" { + return nil, fmt.Errorf("artifact is required") + } + if scrollRoot == "" { + return nil, fmt.Errorf("scroll root is required") + } + if dataRoot == "" { + return nil, fmt.Errorf("data root is required") + } + if len(scrollYAML) == 0 { + return nil, fmt.Errorf("scroll yaml is required") + } + scroll, err := domain.NewScrollFromBytes(scrollRoot, scrollYAML) + if err != nil { + return nil, err + } + if err := scroll.Validate(false); err != nil { + return nil, err + } + id, err := RuntimeScrollID(requestedName, scroll.Name) + if err != nil { + return nil, err + } + if _, err := m.store.GetScroll(id); err == nil { + return nil, fmt.Errorf("%w: %s", ErrScrollAlreadyExists, id) + } else if !errors.Is(err, ErrScrollNotFound) { + return nil, err + } + + runtimeScroll := &domain.RuntimeScroll{ + ID: id, + Artifact: artifact, + ScrollRoot: scrollRoot, + DataRoot: dataRoot, + ScrollName: scroll.Name, + ScrollYAML: string(scrollYAML), + Status: domain.RuntimeScrollStatusCreated, + Commands: map[string]domain.LockStatus{}, + } + if err := m.store.CreateScroll(runtimeScroll); err != nil { + return nil, err + } + return runtimeScroll, nil +} + +func RuntimeScrollID(requestedName string, scrollName string) (string, error) { + id := RuntimeScrollIDFromName(requestedName) + if id == "" { + id = RuntimeScrollIDFromName(scrollName) + } + if id == "" { + return "", fmt.Errorf("scroll id could not be generated") + } + return id, nil +} + +func RuntimeScrollIDFromName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "" + } + if slash := strings.LastIndex(name, "/"); slash >= 0 { + name = name[slash+1:] + } + if at := strings.Index(name, "@"); at >= 0 { + name = name[:at] + } + if colon := strings.Index(name, ":"); colon >= 0 { + name = name[:colon] + } + name = strings.ToLower(name) + name = regexp.MustCompile(`[^a-z0-9_.-]+`).ReplaceAllString(name, "-") + name = strings.Trim(name, "-_.") + return name +} + +func MaterializeScrollArtifact(artifact string, scrollRoot string, dataRoot string, ociRegistry ports.OciRegistryInterface, includeData bool) error { + if artifact == "" { + return fmt.Errorf("artifact is required") + } + if scrollRoot == "" { + return fmt.Errorf("scroll root is required") + } + if dataRoot == "" { + return fmt.Errorf("data root is required") + } + if err := os.RemoveAll(scrollRoot); err != nil { + return err + } + if err := os.MkdirAll(scrollRoot, 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { + return err + } + if localPathExists(artifact) { + if err := materializeLocalArtifact(artifact, scrollRoot); err != nil { + return err + } + return moveRuntimeData(scrollRoot, dataRoot) + } + if ociRegistry == nil { + return fmt.Errorf("OCI registry is required to pull %s", artifact) + } + if err := ociRegistry.PullSelective(scrollRoot, artifact, includeData, nil); err != nil { + return err + } + if includeData { + return moveRuntimeData(scrollRoot, dataRoot) + } + return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) +} + +func moveRuntimeData(scrollRoot string, dataRoot string) error { + src := filepath.Join(scrollRoot, domain.RuntimeDataDir) + if !localPathExists(src) { + return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) + } + dst := filepath.Join(dataRoot, domain.RuntimeDataDir) + if err := os.RemoveAll(dst); err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + if err := os.Rename(src, dst); err == nil { + return nil + } + if err := copyDir(src, dst); err != nil { + return err + } + return os.RemoveAll(src) +} + +func MoveMaterializedScroll(srcScrollRoot string, srcDataRoot string, dstScrollRoot string, dstDataRoot string) error { + if localPathExists(dstScrollRoot) { + return fmt.Errorf("target scroll root already exists: %s", dstScrollRoot) + } + if localPathExists(dstDataRoot) { + return fmt.Errorf("target data root already exists: %s", dstDataRoot) + } + if err := os.MkdirAll(filepath.Dir(dstScrollRoot), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(dstDataRoot), 0755); err != nil { + return err + } + if err := os.Rename(srcScrollRoot, dstScrollRoot); err != nil { + if err := copyDir(srcScrollRoot, dstScrollRoot); err != nil { + return err + } + if err := os.RemoveAll(srcScrollRoot); err != nil { + return err + } + } + if err := os.Rename(srcDataRoot, dstDataRoot); err != nil { + if err := copyDir(srcDataRoot, dstDataRoot); err != nil { + return err + } + if err := os.RemoveAll(srcDataRoot); err != nil { + return err + } + } + return nil +} + +func materializeLocalArtifact(artifact string, scrollRoot string) error { + info, err := os.Stat(artifact) + if err != nil { + return err + } + if !info.IsDir() { + if filepath.Base(artifact) != "scroll.yaml" { + return fmt.Errorf("local file artifact must be scroll.yaml") + } + return copyFile(artifact, filepath.Join(scrollRoot, "scroll.yaml")) + } + return copyDir(artifact, scrollRoot) +} + +func copyDir(src string, dst string) error { + return filepath.WalkDir(src, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + if rel == "." { + return nil + } + target := filepath.Join(dst, rel) + if d.IsDir() { + return os.MkdirAll(target, 0755) + } + return copyFile(path, target) + }) +} + +func copyFile(src string, dst string) error { + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} + +func localPathExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} diff --git a/internal/core/services/runtime_scroll_manager_test.go b/internal/core/services/runtime_scroll_manager_test.go new file mode 100644 index 00000000..6cb87a71 --- /dev/null +++ b/internal/core/services/runtime_scroll_manager_test.go @@ -0,0 +1,55 @@ +package services + +import ( + "errors" + "path/filepath" + "testing" +) + +const testScrollYAML = `name: ghcr.io/druid-examples/static-web:1.0 +desc: Static web +version: 0.1.0 +app_version: "1.0" +serve: start +commands: + start: + procedures: + - image: alpine:3.20 + command: ["true"] +` + +func TestRuntimeScrollID(t *testing.T) { + tests := []struct { + name string + scrollName string + want string + }{ + {name: "local dev", scrollName: "ignored", want: "local-dev"}, + {name: "", scrollName: "ghcr.io/druid-examples/static-web:1.0", want: "static-web"}, + {name: "", scrollName: "ghcr.io/druid-examples/static-web@sha256:abc", want: "static-web"}, + } + for _, tt := range tests { + t.Run(tt.want, func(t *testing.T) { + got, err := RuntimeScrollID(tt.name, tt.scrollName) + if err != nil { + t.Fatal(err) + } + if got != tt.want { + t.Fatalf("id = %q, want %q", got, tt.want) + } + }) + } +} + +func TestRuntimeScrollManagerCreateFailsDuplicateID(t *testing.T) { + store := NewRuntimeStateStore(t.TempDir()) + manager := NewRuntimeScrollManager(store) + + if _, err := manager.Create("artifact", "", t.TempDir(), filepath.Join(t.TempDir(), "data"), []byte(testScrollYAML)); err != nil { + t.Fatal(err) + } + _, err := manager.Create("artifact", "", t.TempDir(), filepath.Join(t.TempDir(), "data"), []byte(testScrollYAML)) + if !errors.Is(err, ErrScrollAlreadyExists) { + t.Fatalf("error = %v, want ErrScrollAlreadyExists", err) + } +} diff --git a/internal/core/services/runtime_state_store.go b/internal/core/services/runtime_state_store.go new file mode 100644 index 00000000..1d62e5f4 --- /dev/null +++ b/internal/core/services/runtime_state_store.go @@ -0,0 +1,338 @@ +package services + +import ( + "database/sql" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/highcard-dev/daemon/internal/core/domain" + _ "modernc.org/sqlite" +) + +var ErrScrollNotFound = errors.New("runtime scroll not found") + +type RuntimeStateStore struct { + stateDir string + dbPath string +} + +type RuntimeScrollStore interface { + StateDir() string + ScrollRoot(id string) string + DataRoot(id string) string + CreateScroll(scroll *domain.RuntimeScroll) error + ListScrolls() ([]*domain.RuntimeScroll, error) + GetScroll(id string) (*domain.RuntimeScroll, error) + UpdateScroll(scroll *domain.RuntimeScroll) error + DeleteScroll(id string) error +} + +func NewRuntimeStateStore(stateDir string) *RuntimeStateStore { + return &RuntimeStateStore{ + stateDir: stateDir, + dbPath: filepath.Join(stateDir, "state.db"), + } +} + +func (s *RuntimeStateStore) StateDir() string { + return s.stateDir +} + +func (s *RuntimeStateStore) ScrollRoot(id string) string { + return filepath.Join(s.stateDir, "scrolls", id, "spec") +} + +func (s *RuntimeStateStore) DataRoot(id string) string { + return filepath.Join(s.stateDir, "data", id) +} + +func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { + db, err := s.open() + if err != nil { + return err + } + defer db.Close() + + now := time.Now().UTC() + scroll.CreatedAt = now + scroll.UpdatedAt = now + if scroll.Status == "" { + scroll.Status = domain.RuntimeScrollStatusCreated + } + if scroll.Commands == nil { + scroll.Commands = map[string]domain.LockStatus{} + } + commands, err := json.Marshal(scroll.Commands) + if err != nil { + return err + } + + _, err = db.Exec(` + INSERT INTO scrolls (id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, scroll.ID, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, formatTime(scroll.CreatedAt), formatTime(scroll.UpdatedAt), string(commands)) + if err != nil { + return fmt.Errorf("create runtime scroll %s: %w", scroll.ID, err) + } + return nil +} + +func (s *RuntimeStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { + db, err := s.open() + if err != nil { + return nil, err + } + defer db.Close() + + rows, err := db.Query(` + SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json + FROM scrolls + ORDER BY id + `) + if err != nil { + return nil, err + } + defer rows.Close() + + scrolls := []*domain.RuntimeScroll{} + for rows.Next() { + scroll, err := scanRuntimeScroll(rows) + if err != nil { + return nil, err + } + scrolls = append(scrolls, scroll) + } + return scrolls, rows.Err() +} + +func (s *RuntimeStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) { + db, err := s.open() + if err != nil { + return nil, err + } + defer db.Close() + + row := db.QueryRow(` + SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json + FROM scrolls + WHERE id = ? + `, id) + scroll, err := scanRuntimeScroll(row) + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrScrollNotFound + } + return scroll, err +} + +func (s *RuntimeStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { + db, err := s.open() + if err != nil { + return err + } + defer db.Close() + + scroll.UpdatedAt = time.Now().UTC() + commands, err := json.Marshal(scroll.Commands) + if err != nil { + return err + } + res, err := db.Exec(` + UPDATE scrolls + SET owner_id = ?, artifact = ?, scroll_root = ?, data_root = ?, scroll_name = ?, scroll_yaml = ?, status = ?, updated_at = ?, commands_json = ? + WHERE id = ? + `, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, formatTime(scroll.UpdatedAt), string(commands), scroll.ID) + if err != nil { + return err + } + changed, err := res.RowsAffected() + if err != nil { + return err + } + if changed == 0 { + return ErrScrollNotFound + } + return nil +} + +func (s *RuntimeStateStore) DeleteScroll(id string) error { + db, err := s.open() + if err != nil { + return err + } + defer db.Close() + + res, err := db.Exec(`DELETE FROM scrolls WHERE id = ?`, id) + if err != nil { + return err + } + changed, err := res.RowsAffected() + if err != nil { + return err + } + if changed == 0 { + return ErrScrollNotFound + } + return nil +} + +func (s *RuntimeStateStore) open() (*sql.DB, error) { + if err := os.MkdirAll(s.stateDir, 0755); err != nil { + return nil, err + } + db, err := sql.Open("sqlite", s.dbPath) + if err != nil { + return nil, err + } + if _, err := db.Exec(`PRAGMA journal_mode = WAL`); err != nil { + db.Close() + return nil, err + } + if _, err := db.Exec(` + CREATE TABLE IF NOT EXISTS scrolls ( + id TEXT PRIMARY KEY, + owner_id TEXT NOT NULL DEFAULT '', + artifact TEXT NOT NULL, + scroll_root TEXT NOT NULL, + data_root TEXT NOT NULL DEFAULT '', + scroll_name TEXT NOT NULL, + scroll_yaml TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + commands_json TEXT NOT NULL DEFAULT '{}' + ) + `); err != nil { + db.Close() + return nil, err + } + if err := ensureColumn(db, "scrolls", "data_root", "TEXT NOT NULL DEFAULT ''"); err != nil { + db.Close() + return nil, err + } + if err := ensureColumn(db, "scrolls", "scroll_yaml", "TEXT NOT NULL DEFAULT ''"); err != nil { + db.Close() + return nil, err + } + if err := removeRuntimeColumn(db); err != nil { + db.Close() + return nil, err + } + return db, nil +} + +func removeRuntimeColumn(db *sql.DB) error { + hasRuntime, err := tableHasColumn(db, "scrolls", "runtime") + if err != nil || !hasRuntime { + return err + } + if _, err := db.Exec(` + CREATE TABLE scrolls_new ( + id TEXT PRIMARY KEY, + owner_id TEXT NOT NULL DEFAULT '', + artifact TEXT NOT NULL, + scroll_root TEXT NOT NULL, + data_root TEXT NOT NULL DEFAULT '', + scroll_name TEXT NOT NULL, + scroll_yaml TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + commands_json TEXT NOT NULL DEFAULT '{}' + ) + `); err != nil { + return err + } + if _, err := db.Exec(` + INSERT INTO scrolls_new (id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json) + SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json + FROM scrolls + `); err != nil { + return err + } + if _, err := db.Exec(`DROP TABLE scrolls`); err != nil { + return err + } + if _, err := db.Exec(`ALTER TABLE scrolls_new RENAME TO scrolls`); err != nil { + return err + } + return nil +} + +func ensureColumn(db *sql.DB, table string, column string, definition string) error { + exists, err := tableHasColumn(db, table, column) + if err != nil || exists { + return err + } + _, err = db.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", table, column, definition)) + return err +} + +func tableHasColumn(db *sql.DB, table string, column string) (bool, error) { + rows, err := db.Query(fmt.Sprintf("PRAGMA table_info(%s)", table)) + if err != nil { + return false, err + } + defer rows.Close() + for rows.Next() { + var cid int + var name string + var columnType string + var notNull int + var defaultValue sql.NullString + var pk int + if err := rows.Scan(&cid, &name, &columnType, ¬Null, &defaultValue, &pk); err != nil { + return false, err + } + if name == column { + return true, nil + } + } + if err := rows.Err(); err != nil { + return false, err + } + return false, nil +} + +type runtimeScrollScanner interface { + Scan(dest ...interface{}) error +} + +func scanRuntimeScroll(scanner runtimeScrollScanner) (*domain.RuntimeScroll, error) { + var scroll domain.RuntimeScroll + var status string + var createdAt string + var updatedAt string + var commandsJSON string + if err := scanner.Scan(&scroll.ID, &scroll.OwnerID, &scroll.Artifact, &scroll.ScrollRoot, &scroll.DataRoot, &scroll.ScrollName, &scroll.ScrollYAML, &status, &createdAt, &updatedAt, &commandsJSON); err != nil { + return nil, err + } + scroll.Status = domain.RuntimeScrollStatus(status) + scroll.CreatedAt = parseTime(createdAt) + scroll.UpdatedAt = parseTime(updatedAt) + if commandsJSON == "" { + commandsJSON = "{}" + } + if err := json.Unmarshal([]byte(commandsJSON), &scroll.Commands); err != nil { + return nil, err + } + if scroll.Commands == nil { + scroll.Commands = map[string]domain.LockStatus{} + } + return &scroll, nil +} + +func formatTime(t time.Time) string { + return t.UTC().Format(time.RFC3339Nano) +} + +func parseTime(value string) time.Time { + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return time.Time{} + } + return t +} diff --git a/internal/core/services/runtime_state_store_test.go b/internal/core/services/runtime_state_store_test.go new file mode 100644 index 00000000..a80be484 --- /dev/null +++ b/internal/core/services/runtime_state_store_test.go @@ -0,0 +1,135 @@ +package services_test + +import ( + "database/sql" + "path/filepath" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services" + _ "modernc.org/sqlite" +) + +func TestRuntimeStateStorePersistsCommandStatuses(t *testing.T) { + store := services.NewRuntimeStateStore(t.TempDir()) + exitCode := 2 + scroll := &domain.RuntimeScroll{ + ID: "test", + Artifact: "example", + ScrollRoot: "/tmp/spec", + DataRoot: "/tmp/data", + ScrollName: "test", + ScrollYAML: "name: test\n", + Commands: map[string]domain.LockStatus{ + "start": { + Status: domain.ScrollLockStatusRunning, + LastStatusChange: 10, + }, + }, + } + + if err := store.CreateScroll(scroll); err != nil { + t.Fatal(err) + } + + scroll.Commands["start"] = domain.LockStatus{ + Status: domain.ScrollLockStatusError, + ExitCode: &exitCode, + LastStatusChange: 20, + } + scroll.Status = domain.RuntimeScrollStatusError + if err := store.UpdateScroll(scroll); err != nil { + t.Fatal(err) + } + + got, err := store.GetScroll("test") + if err != nil { + t.Fatal(err) + } + status := got.Commands["start"] + if status.Status != domain.ScrollLockStatusError { + t.Fatalf("status = %s, want error", status.Status) + } + if status.ExitCode == nil || *status.ExitCode != exitCode { + t.Fatalf("exit code = %v, want %d", status.ExitCode, exitCode) + } + if status.LastStatusChange != 20 { + t.Fatalf("last status change = %d, want 20", status.LastStatusChange) + } + if got.ScrollYAML != "name: test\n" { + t.Fatalf("scroll yaml = %q, want cached yaml", got.ScrollYAML) + } +} + +func TestRuntimeStateStoreMigratesRuntimeColumn(t *testing.T) { + stateDir := t.TempDir() + dbPath := filepath.Join(stateDir, "state.db") + db, err := sql.Open("sqlite", dbPath) + if err != nil { + t.Fatal(err) + } + if _, err := db.Exec(` + CREATE TABLE scrolls ( + id TEXT PRIMARY KEY, + owner_id TEXT NOT NULL DEFAULT '', + artifact TEXT NOT NULL, + runtime TEXT NOT NULL, + scroll_root TEXT NOT NULL, + data_root TEXT NOT NULL DEFAULT '', + scroll_name TEXT NOT NULL, + scroll_yaml TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + commands_json TEXT NOT NULL DEFAULT '{}' + ) + `); err != nil { + t.Fatal(err) + } + if _, err := db.Exec(` + INSERT INTO scrolls (id, owner_id, artifact, runtime, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, "legacy", "", "example", "docker", "/tmp/spec", "/tmp/data", "legacy", "name: legacy\n", "stopped", "2026-01-01T00:00:00Z", "2026-01-01T00:00:00Z", "{}"); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } + + store := services.NewRuntimeStateStore(stateDir) + got, err := store.GetScroll("legacy") + if err != nil { + t.Fatal(err) + } + if got.ID != "legacy" || got.Artifact != "example" || got.ScrollYAML != "name: legacy\n" { + t.Fatalf("migrated scroll = %#v", got) + } + + db, err = sql.Open("sqlite", dbPath) + if err != nil { + t.Fatal(err) + } + defer db.Close() + rows, err := db.Query(`PRAGMA table_info(scrolls)`) + if err != nil { + t.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var cid int + var name string + var columnType string + var notNull int + var defaultValue sql.NullString + var pk int + if err := rows.Scan(&cid, &name, &columnType, ¬Null, &defaultValue, &pk); err != nil { + t.Fatal(err) + } + if name == "runtime" { + t.Fatal("runtime column should be removed during migration") + } + } + if err := rows.Err(); err != nil { + t.Fatal(err) + } +} diff --git a/internal/core/services/scroll_service.go b/internal/core/services/scroll_service.go index bfab534c..f22fc1ad 100644 --- a/internal/core/services/scroll_service.go +++ b/internal/core/services/scroll_service.go @@ -3,31 +3,21 @@ package services import ( "errors" "os" - "path/filepath" - "regexp" "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" "github.com/highcard-dev/daemon/internal/utils" - "gopkg.in/yaml.v2" ) type ScrollService struct { - scrollDir string - scroll *domain.Scroll - lock *domain.ScrollLock - templateRenderer ports.TemplateRendererInterface -} -type TemplateData struct { - Config interface{} + scrollDir string + scroll *domain.Scroll } func NewScrollService( - processCwd string, + scrollDir string, ) (*ScrollService, error) { s := &ScrollService{ - scrollDir: processCwd, - templateRenderer: NewTemplateRenderer(), + scrollDir: scrollDir, } _, err := s.ReloadScroll() @@ -35,9 +25,22 @@ func NewScrollService( return s, err } +func NewCachedScrollService(scrollDir string, scrollYAML []byte) (*ScrollService, error) { + s := &ScrollService{ + scrollDir: scrollDir, + } + scroll, err := domain.NewScrollFromBytes(scrollDir, scrollYAML) + if err != nil { + return nil, err + } + if err := scroll.Validate(false); err != nil { + return nil, err + } + s.scroll = scroll + return s, nil +} + func (sc *ScrollService) ReloadScroll() (*domain.Scroll, error) { - // TODO: better templating for scrolls in next version or so - os.Setenv("SCROLL_DIR", sc.GetDir()) scroll, err := domain.NewScroll(sc.GetDir()) if err != nil { @@ -55,56 +58,6 @@ func (sc *ScrollService) ReloadScroll() (*domain.Scroll, error) { return scroll, nil } -// Load Scroll and render templates in the cwd -func (sc *ScrollService) ReloadLock(ignoreVersionCheck bool) (*domain.ScrollLock, error) { - - var scroll = sc.scroll - - lock := sc.ReadLock() - - sc.lock = lock - - //Update the lock with the current scroll version - if lock.ScrollVersion == nil { - lock.ScrollVersion = scroll.Version - lock.ScrollName = scroll.Name - lock.Write() - } else { - if !lock.ScrollVersion.Equal(sc.scroll.Version) && !ignoreVersionCheck { - return lock, errors.New("scroll version mismatch") - } - } - - return lock, nil - -} - -func (sc *ScrollService) LockExists() bool { - exisits, err := utils.FileExists(sc.GetDir() + "/scroll-lock.json") - return err == nil && exisits -} - -func (sc *ScrollService) ReadLock() *domain.ScrollLock { - lock, err := domain.ReadLock(sc.GetDir() + "/scroll-lock.json") - - if err != nil { - return sc.WriteNewScrollLock() - } - return lock -} - -func (sc *ScrollService) GetLock() (*domain.ScrollLock, error) { - if sc.lock != nil { - return sc.lock, nil - } - - return nil, errors.New("lock not found") -} - -func (sc *ScrollService) WriteNewScrollLock() *domain.ScrollLock { - return domain.WriteNewScrollLock(sc.GetDir() + "/scroll-lock.json") -} - func (sc *ScrollService) GetDir() string { return sc.scrollDir } @@ -127,91 +80,6 @@ func (s ScrollService) ScrollExists() bool { return b && err == nil } -func isScrollConfigTemplate(path string) bool { - return filepath.Base(path) == domain.ScrollConfigTemplate -} - -// ensureScrollConfigFromTemplate renders scroll-config.yml.scroll_template to -// produce scroll-config.yml when the config file does not yet exist. This is a -// one-shot bootstrap: once the file is present it is never overwritten, so -// user edits and non-deterministic template output (e.g. randAlphaNum) are -// preserved across restarts. -func (s ScrollService) ensureScrollConfigFromTemplate() error { - configPath := filepath.Join(s.scrollDir, domain.ScrollConfigFile) - if exists, _ := utils.FileExists(configPath); exists { - return nil - } - - templatePath := filepath.Join(s.scrollDir, domain.ScrollConfigTemplate) - if ok, _ := utils.FileExists(templatePath); !ok { - return nil - } - - config := TemplateData{} - return s.templateRenderer.RenderScrollTemplateFiles("", []string{templatePath}, config, "") -} - -func (s ScrollService) RenderCwdTemplates() error { - if err := s.ensureScrollConfigFromTemplate(); err != nil { - return err - } - - cwd := s.scrollDir - - libRegEx, err := regexp.Compile(`^.+\.(scroll_template)$`) - if err != nil { - return err - } - - files := []string{} - filepath.Walk(cwd, func(path string, info os.FileInfo, err error) error { - if !libRegEx.MatchString(path) { - return nil - } - if isScrollConfigTemplate(path) { - return nil - } - files = append(files, path) - return nil - }) - - if len(files) == 0 { - return nil - } - - config := TemplateData{Config: s.GetScrollConfig()} - - return s.templateRenderer.RenderScrollTemplateFiles("", files, config, "") -} - -func (s ScrollService) GetScrollConfig() interface{} { - - var data interface{} - - content := s.GetScrollConfigRawYaml() - - if len(content) == 0 { - return data - } - - // Unmarshal the YAML data into the struct - yaml.Unmarshal(content, &data) - - return data -} - -func (s ScrollService) GetScrollConfigRawYaml() []byte { - path := filepath.Join(s.scrollDir, domain.ScrollConfigFile) - - content, err := os.ReadFile(path) - - if err != nil { - return []byte{} - } - - return content -} - func (sc *ScrollService) GetCommand(cmd string) (*domain.CommandInstructionSet, error) { scroll := sc.GetFile() //check if we can accually do it before we start @@ -221,11 +89,3 @@ func (sc *ScrollService) GetCommand(cmd string) (*domain.CommandInstructionSet, return nil, errors.New("command " + cmd + " not found") } } - -func (sc *ScrollService) AddTemporaryCommand(cmd string, instructions *domain.CommandInstructionSet) { - scroll := sc.GetFile() - if scroll.Commands == nil { - scroll.Commands = make(map[string]*domain.CommandInstructionSet) - } - scroll.Commands[cmd] = instructions -} diff --git a/internal/core/services/scroll_service_test.go b/internal/core/services/scroll_service_test.go deleted file mode 100644 index 4d81c57c..00000000 --- a/internal/core/services/scroll_service_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package services_test - -import ( - "os" - "path/filepath" - "testing" - - "github.com/highcard-dev/daemon/internal/core/services" -) - -const minimalScrollYaml = `name: test-scroll -version: 0.0.1 -commands: {} -` - -func writeFile(t *testing.T, path, content string) { - t.Helper() - if err := os.WriteFile(path, []byte(content), 0644); err != nil { - t.Fatalf("failed to write %s: %v", path, err) - } -} - -func TestRenderCwdTemplates_BootstrapsScrollConfig(t *testing.T) { - dir := t.TempDir() - writeFile(t, filepath.Join(dir, "scroll.yaml"), minimalScrollYaml) - writeFile(t, filepath.Join(dir, "scroll-config.yml.scroll_template"), "key: generated-value\n") - - svc, err := services.NewScrollService(dir) - if err != nil { - t.Fatalf("NewScrollService: %v", err) - } - - if err := svc.RenderCwdTemplates(); err != nil { - t.Fatalf("RenderCwdTemplates: %v", err) - } - - content, err := os.ReadFile(filepath.Join(dir, "scroll-config.yml")) - if err != nil { - t.Fatalf("scroll-config.yml should exist after bootstrap: %v", err) - } - if string(content) != "key: generated-value\n" { - t.Errorf("unexpected config content: %q", string(content)) - } -} - -func TestRenderCwdTemplates_DoesNotOverwriteExistingConfig(t *testing.T) { - dir := t.TempDir() - writeFile(t, filepath.Join(dir, "scroll.yaml"), minimalScrollYaml) - writeFile(t, filepath.Join(dir, "scroll-config.yml"), "key: user-edited\n") - writeFile(t, filepath.Join(dir, "scroll-config.yml.scroll_template"), "key: {{ randAlphaNum 50 }}\n") - - svc, err := services.NewScrollService(dir) - if err != nil { - t.Fatalf("NewScrollService: %v", err) - } - - if err := svc.RenderCwdTemplates(); err != nil { - t.Fatalf("RenderCwdTemplates: %v", err) - } - - content, err := os.ReadFile(filepath.Join(dir, "scroll-config.yml")) - if err != nil { - t.Fatalf("reading config: %v", err) - } - if string(content) != "key: user-edited\n" { - t.Errorf("existing config was overwritten: %q", string(content)) - } -} - -func TestRenderCwdTemplates_OtherTemplatesReceiveBootstrappedConfig(t *testing.T) { - dir := t.TempDir() - writeFile(t, filepath.Join(dir, "scroll.yaml"), minimalScrollYaml) - writeFile(t, filepath.Join(dir, "scroll-config.yml.scroll_template"), "greeting: hello\n") - writeFile(t, filepath.Join(dir, "app.conf.scroll_template"), "value: {{ .Config.greeting }}\n") - - svc, err := services.NewScrollService(dir) - if err != nil { - t.Fatalf("NewScrollService: %v", err) - } - - if err := svc.RenderCwdTemplates(); err != nil { - t.Fatalf("RenderCwdTemplates: %v", err) - } - - content, err := os.ReadFile(filepath.Join(dir, "app.conf")) - if err != nil { - t.Fatalf("app.conf should exist: %v", err) - } - if string(content) != "value: hello\n" { - t.Errorf("other template did not see bootstrapped config: %q", string(content)) - } -} diff --git a/internal/core/services/template_renderer.go b/internal/core/services/template_renderer.go deleted file mode 100644 index 6a124064..00000000 --- a/internal/core/services/template_renderer.go +++ /dev/null @@ -1,72 +0,0 @@ -package services - -import ( - "bytes" - "html/template" - "os" - "path" - "path/filepath" - "strings" - - "github.com/Masterminds/sprig" -) - -type TemplateRenderer struct{} - -func NewTemplateRenderer() *TemplateRenderer { - return &TemplateRenderer{} -} - -func (tr *TemplateRenderer) RenderTemplate(templatePath string, data interface{}) (string, error) { - tmpl, err := template.New("scroll_template").Funcs(sprig.TxtFuncMap()).Parse(templatePath) - if err != nil { - return "", err - } - - var tpl bytes.Buffer - err = tmpl.Execute(&tpl, data) - - if err != nil { - return "", err - } - - return tpl.String(), err -} - -func (tr *TemplateRenderer) RenderScrollTemplateFiles(templateBase string, templateFiles []string, data any, outputDir string) error { - for _, templateFile := range templateFiles { - tpl := template.New("scroll_template").Funcs(sprig.TxtFuncMap()) - // Parse the template files - templates, err := tpl.ParseFiles(path.Join(templateBase, templateFile)) - if err != nil { - return err - } - // Remove the "template" suffix from the file name - outputFileName := strings.TrimSuffix(templateFile, ".scroll_template") - - if outputDir != "" { - // Prepend the output directory if specified - outputFileName = filepath.Join(outputDir, outputFileName) - } - - //ensure the output directory exists - outputDirPath := filepath.Dir(outputFileName) - if err := os.MkdirAll(outputDirPath, os.ModePerm); err != nil { - return err - } - - // Create a new file for the rendered output - outputFile, err := os.Create(outputFileName) - if err != nil { - return err - } - defer outputFile.Close() - - // Execute the template and write the output to the file - err = templates.Funcs(sprig.FuncMap()).ExecuteTemplate(outputFile, filepath.Base(templateFile), data) - if err != nil { - return err - } - } - return nil -} diff --git a/internal/handler/annotation_handler.go b/internal/handler/annotation_handler.go deleted file mode 100644 index f4b06b8c..00000000 --- a/internal/handler/annotation_handler.go +++ /dev/null @@ -1,21 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type AnnotationHandler struct { - scrollService ports.ScrollServiceInterface -} - -func NewAnnotationHandler(scrollService ports.ScrollServiceInterface) *AnnotationHandler { - return &AnnotationHandler{ - scrollService: scrollService, - } -} - -func (ah AnnotationHandler) Annotations(c *fiber.Ctx) error { - annotationsFile := ah.scrollService.GetDir() + "/annotations.json" - return c.SendFile(annotationsFile) -} diff --git a/internal/handler/annotation_handler_test.go b/internal/handler/annotation_handler_test.go deleted file mode 100644 index 0f3621a6..00000000 --- a/internal/handler/annotation_handler_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package handler - -import ( - "net/http/httptest" - "os" - "path/filepath" - "testing" - - "github.com/gofiber/fiber/v2" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// AnnotationTestContext holds all mocked services for annotation handler testing -type AnnotationTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - ScrollService *mock_ports.MockScrollServiceInterface - Handler *AnnotationHandler -} - -// setupAnnotationTestApp creates a Fiber app with mocked dependencies for testing -func setupAnnotationTestApp(t *testing.T) *AnnotationTestContext { - ctrl := gomock.NewController(t) - - scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - handler := NewAnnotationHandler(scrollService) - - app := fiber.New() - app.Get("/annotations", handler.Annotations) - - return &AnnotationTestContext{ - App: app, - Ctrl: ctrl, - ScrollService: scrollService, - Handler: handler, - } -} - -func TestAnnotationHandler_Annotations_Success(t *testing.T) { - tc := setupAnnotationTestApp(t) - defer tc.Ctrl.Finish() - - // Create a temporary directory and file for testing - tempDir, err := os.MkdirTemp("", "annotation-test") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tempDir) - - // Create annotations.json file - annotationsFile := filepath.Join(tempDir, "annotations.json") - annotationsContent := `{"key": "value"}` - if err := os.WriteFile(annotationsFile, []byte(annotationsContent), 0644); err != nil { - t.Fatalf("Failed to create annotations file: %v", err) - } - - tc.ScrollService.EXPECT().GetDir().Return(tempDir) - - req := httptest.NewRequest("GET", "/annotations", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestAnnotationHandler_Annotations_FileNotFound(t *testing.T) { - tc := setupAnnotationTestApp(t) - defer tc.Ctrl.Finish() - - // Return a directory that doesn't exist - tc.ScrollService.EXPECT().GetDir().Return("/non/existent/path") - - req := httptest.NewRequest("GET", "/annotations", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Fiber returns 404 when file is not found - if resp.StatusCode != 404 { - t.Errorf("Expected status 404, got %d", resp.StatusCode) - } -} - -func TestAnnotationHandler_Annotations_EmptyDir(t *testing.T) { - tc := setupAnnotationTestApp(t) - defer tc.Ctrl.Finish() - - // Create temp dir without annotations file - tempDir, err := os.MkdirTemp("", "annotation-test-empty") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tempDir) - - tc.ScrollService.EXPECT().GetDir().Return(tempDir) - - req := httptest.NewRequest("GET", "/annotations", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Should return 404 when file doesn't exist - if resp.StatusCode != 404 { - t.Errorf("Expected status 404, got %d", resp.StatusCode) - } -} - -func TestAnnotationHandler_Annotations_ValidJSON(t *testing.T) { - tc := setupAnnotationTestApp(t) - defer tc.Ctrl.Finish() - - tempDir, err := os.MkdirTemp("", "annotation-test-json") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tempDir) - - // Create a valid JSON annotations file - annotationsFile := filepath.Join(tempDir, "annotations.json") - annotationsContent := `{ - "annotations": [ - {"name": "cpu", "value": "50%"}, - {"name": "memory", "value": "1GB"} - ] - }` - if err := os.WriteFile(annotationsFile, []byte(annotationsContent), 0644); err != nil { - t.Fatalf("Failed to create annotations file: %v", err) - } - - tc.ScrollService.EXPECT().GetDir().Return(tempDir) - - req := httptest.NewRequest("GET", "/annotations", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - // Check content type - contentType := resp.Header.Get("Content-Type") - if contentType != "application/json" { - // Note: Fiber sets content type based on file extension - t.Logf("Content-Type: %s", contentType) - } -} diff --git a/internal/handler/coldstarter_handler.go b/internal/handler/coldstarter_handler.go deleted file mode 100644 index 51d2b38a..00000000 --- a/internal/handler/coldstarter_handler.go +++ /dev/null @@ -1,22 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type ColdstarterHandler struct { - coldstarter ports.ColdStarterInterface -} - -func NewColdstarterHandler(coldstarter ports.ColdStarterInterface) *ColdstarterHandler { - return &ColdstarterHandler{ - coldstarter: coldstarter, - } -} - -func (ah ColdstarterHandler) FinishColdstarter(c *fiber.Ctx) error { - ah.coldstarter.Finish(nil) - c.Status(202) - return nil -} diff --git a/internal/handler/coldstarter_handler_test.go b/internal/handler/coldstarter_handler_test.go deleted file mode 100644 index fd078ed9..00000000 --- a/internal/handler/coldstarter_handler_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package handler - -import ( - "net/http/httptest" - "testing" - - "github.com/gofiber/fiber/v2" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// ColdstarterTestContext holds all mocked services for coldstarter handler testing -type ColdstarterTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - Coldstarter *mock_ports.MockColdStarterInterface - Handler *ColdstarterHandler -} - -// setupColdstarterTestApp creates a Fiber app with mocked dependencies for testing -func setupColdstarterTestApp(t *testing.T) *ColdstarterTestContext { - ctrl := gomock.NewController(t) - - coldstarter := mock_ports.NewMockColdStarterInterface(ctrl) - handler := NewColdstarterHandler(coldstarter) - - app := fiber.New() - app.Post("/api/v1/coldstarter/finish", handler.FinishColdstarter) - - return &ColdstarterTestContext{ - App: app, - Ctrl: ctrl, - Coldstarter: coldstarter, - Handler: handler, - } -} - -func TestColdstarterHandler_Finish_Success(t *testing.T) { - tc := setupColdstarterTestApp(t) - defer tc.Ctrl.Finish() - - // Finish is called with nil argument - tc.Coldstarter.EXPECT().Finish(nil) - - req := httptest.NewRequest("POST", "/api/v1/coldstarter/finish", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 202 { - t.Errorf("Expected status 202, got %d", resp.StatusCode) - } -} - -func TestColdstarterHandler_Finish_CalledOnce(t *testing.T) { - tc := setupColdstarterTestApp(t) - defer tc.Ctrl.Finish() - - // Verify Finish is called exactly once - tc.Coldstarter.EXPECT().Finish(nil).Times(1) - - req := httptest.NewRequest("POST", "/api/v1/coldstarter/finish", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 202 { - t.Errorf("Expected status 202, got %d", resp.StatusCode) - } -} - -func TestColdstarterHandler_Finish_WithBody(t *testing.T) { - tc := setupColdstarterTestApp(t) - defer tc.Ctrl.Finish() - - // Handler ignores request body, still calls Finish with nil - tc.Coldstarter.EXPECT().Finish(nil) - - req := httptest.NewRequest("POST", "/api/v1/coldstarter/finish", nil) - req.Header.Set("Content-Type", "application/json") - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 202 { - t.Errorf("Expected status 202, got %d", resp.StatusCode) - } -} diff --git a/internal/handler/daemon_handler.go b/internal/handler/daemon_handler.go deleted file mode 100644 index c8e406b1..00000000 --- a/internal/handler/daemon_handler.go +++ /dev/null @@ -1,22 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/signals" -) - -type DaemonHandler struct { - shutdown *signals.SignalHandler -} - -func NewDaemonHandler(shutdown *signals.SignalHandler) *DaemonHandler { - return &DaemonHandler{ - shutdown: shutdown, - } -} - -func (ah DaemonHandler) StopDaemon(c *fiber.Ctx) error { - ah.shutdown.Stop() - c.Status(201) - return nil -} diff --git a/internal/handler/health_handler.go b/internal/handler/health_handler.go deleted file mode 100644 index 36e116ec..00000000 --- a/internal/handler/health_handler.go +++ /dev/null @@ -1,77 +0,0 @@ -package handler - -import ( - "time" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type HealthHandler struct { - portService ports.PortServiceInterface - timeoutDone bool - Started *time.Time - progress *domain.SnapshotProgress -} - -func NewHealthHandler( - portService ports.PortServiceInterface, - timeoutSec uint, - progress *domain.SnapshotProgress, -) *HealthHandler { - - h := &HealthHandler{ - portService: portService, - timeoutDone: false, - Started: nil, - progress: progress, - } - - // if timeoutSec == 0, we want at some point to not show a bad health status - if timeoutSec != 0 { - timeout := time.NewTimer(time.Duration(timeoutSec) * time.Second) - go h.countdown(timeout) - } - - return h -} - -func (p *HealthHandler) GetHealthAuth(c *fiber.Ctx) error { - - if p.progress != nil { - if mode, ok := p.progress.Mode.Load().(string); ok && mode == "restore" { - pct := float32(p.progress.Percentage.Load()) - return c.JSON(api.HealthResponse{ - Mode: "restore", - Progress: &pct, - }) - } - } - - portsOpen := p.portService.MandatoryPortsOpen() - - if !p.timeoutDone && !portsOpen { - c.SendStatus(503) - return c.JSON(api.HealthResponse{ - Mode: "manditory_ports", - }) - - } - if p.Started == nil { - return c.JSON(api.HealthResponse{ - Mode: "idle", - }) - } - - return c.JSON(api.HealthResponse{ - Mode: "ok", - StartDate: p.Started, - }) -} - -func (p *HealthHandler) countdown(timeout *time.Timer) { - <-timeout.C - p.timeoutDone = true -} diff --git a/internal/handler/health_handler_test.go b/internal/handler/health_handler_test.go deleted file mode 100644 index 3db683f9..00000000 --- a/internal/handler/health_handler_test.go +++ /dev/null @@ -1,197 +0,0 @@ -package handler - -import ( - "encoding/json" - "io" - "net/http/httptest" - "testing" - "time" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// HealthTestContext holds all mocked services for health handler testing -type HealthTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - PortService *mock_ports.MockPortServiceInterface - Handler *HealthHandler -} - -// setupHealthTestApp creates a Fiber app with mocked dependencies for testing -func setupHealthTestApp(t *testing.T, timeoutSec uint) *HealthTestContext { - return setupHealthTestAppWithProgress(t, timeoutSec, nil) -} - -func setupHealthTestAppWithProgress(t *testing.T, timeoutSec uint, progress *domain.SnapshotProgress) *HealthTestContext { - ctrl := gomock.NewController(t) - - portService := mock_ports.NewMockPortServiceInterface(ctrl) - - handler := NewHealthHandler(portService, timeoutSec, progress) - - app := fiber.New() - app.Get("/api/v1/health", handler.GetHealthAuth) - - return &HealthTestContext{ - App: app, - Ctrl: ctrl, - PortService: portService, - Handler: handler, - } -} - -func TestHealthHandler_Health_MandatoryPortsNotOpen(t *testing.T) { - tc := setupHealthTestApp(t, 0) // No timeout - defer tc.Ctrl.Finish() - - // Ports not open and timeout not done - tc.PortService.EXPECT().MandatoryPortsOpen().Return(false) - - req := httptest.NewRequest("GET", "/api/v1/health", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 503 { - t.Errorf("Expected status 503, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.HealthResponse - json.Unmarshal(body, &result) - - if result.Mode != "manditory_ports" { - t.Errorf("Expected mode 'manditory_ports', got '%s'", result.Mode) - } -} - -func TestHealthHandler_Health_Idle(t *testing.T) { - tc := setupHealthTestApp(t, 0) - defer tc.Ctrl.Finish() - - // Ports open, but Started is nil - tc.PortService.EXPECT().MandatoryPortsOpen().Return(true) - - req := httptest.NewRequest("GET", "/api/v1/health", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.HealthResponse - json.Unmarshal(body, &result) - - if result.Mode != "idle" { - t.Errorf("Expected mode 'idle', got '%s'", result.Mode) - } -} - -func TestHealthHandler_Health_Ok(t *testing.T) { - tc := setupHealthTestApp(t, 0) - defer tc.Ctrl.Finish() - - now := time.Now() - tc.Handler.Started = &now - - tc.PortService.EXPECT().MandatoryPortsOpen().Return(true) - - req := httptest.NewRequest("GET", "/api/v1/health", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.HealthResponse - json.Unmarshal(body, &result) - - if result.Mode != "ok" { - t.Errorf("Expected mode 'ok', got '%s'", result.Mode) - } - if result.StartDate == nil { - t.Error("Expected StartDate to be set") - } -} - -func TestHealthHandler_Health_TimeoutDone_PortsClosed(t *testing.T) { - tc := setupHealthTestApp(t, 0) - defer tc.Ctrl.Finish() - - // Manually set timeoutDone to true - tc.Handler.timeoutDone = true - - // Even with ports closed, if timeout is done, we proceed - tc.PortService.EXPECT().MandatoryPortsOpen().Return(false) - - req := httptest.NewRequest("GET", "/api/v1/health", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Should return idle since Started is nil - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.HealthResponse - json.Unmarshal(body, &result) - - if result.Mode != "idle" { - t.Errorf("Expected mode 'idle', got '%s'", result.Mode) - } -} - -func TestHealthHandler_Health_Restore(t *testing.T) { - progress := domain.NewSnapshotProgress() - progress.Mode.Store("restore") - progress.Percentage.Store(42) - - tc := setupHealthTestAppWithProgress(t, 0, progress) - defer tc.Ctrl.Finish() - - req := httptest.NewRequest("GET", "/api/v1/health", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.HealthResponse - json.Unmarshal(body, &result) - - if result.Mode != "restore" { - t.Errorf("Expected mode 'restore', got '%s'", result.Mode) - } - if result.Progress == nil { - t.Fatal("Expected Progress to be set") - } - if *result.Progress != 42 { - t.Errorf("Expected progress 42, got %f", *result.Progress) - } -} diff --git a/internal/handler/port_handler.go b/internal/handler/port_handler.go deleted file mode 100644 index 570ed37b..00000000 --- a/internal/handler/port_handler.go +++ /dev/null @@ -1,68 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils" -) - -type PortHandler struct { - portService ports.PortServiceInterface -} - -func NewPortHandler( - portService ports.PortServiceInterface, -) *PortHandler { - return &PortHandler{ - portService, - } -} - -func (p PortHandler) GetPorts(c *fiber.Ctx) error { - augmentedPorts := p.portService.GetPorts() - - return c.JSON(augmentedPorts) -} - -func (p PortHandler) AddPort(c *fiber.Ctx) error { - var req api.AddPortRequest - if err := c.BodyParser(&req); err != nil { - return c.Status(fiber.StatusBadRequest).JSON(api.ErrorResponse{ - Status: "error", - Error: "invalid request body: " + err.Error(), - }) - } - - port := domain.Port{ - Port: req.Port, - Protocol: string(req.Protocol), - Name: req.Name, - Mandatory: utils.BoolValue(req.Mandatory), - CheckActivity: utils.BoolValue(req.CheckActivity), - Description: utils.StringValue(req.Description), - } - - augmentedPort, err := p.portService.AddPort(port) - if err != nil { - return c.Status(fiber.StatusBadRequest).JSON(api.ErrorResponse{ - Status: "error", - Error: err.Error(), - }) - } - - return c.Status(fiber.StatusCreated).JSON(augmentedPort) -} - -func (p PortHandler) DeletePort(c *fiber.Ctx, port int) error { - err := p.portService.RemovePort(port) - if err != nil { - return c.Status(fiber.StatusNotFound).JSON(api.ErrorResponse{ - Status: "error", - Error: err.Error(), - }) - } - - return c.SendStatus(fiber.StatusNoContent) -} diff --git a/internal/handler/port_handler_test.go b/internal/handler/port_handler_test.go deleted file mode 100644 index 6fd4989a..00000000 --- a/internal/handler/port_handler_test.go +++ /dev/null @@ -1,437 +0,0 @@ -package handler - -import ( - "encoding/json" - "fmt" - "io" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// PortTestContext holds all mocked services for port handler testing -type PortTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - PortService *mock_ports.MockPortServiceInterface - Handler *PortHandler -} - -// setupPortTestApp creates a Fiber app with mocked dependencies for testing -func setupPortTestApp(t *testing.T) *PortTestContext { - ctrl := gomock.NewController(t) - - portService := mock_ports.NewMockPortServiceInterface(ctrl) - handler := NewPortHandler(portService) - - app := fiber.New() - app.Get("/api/v1/ports", handler.GetPorts) - app.Post("/api/v1/ports", handler.AddPort) - app.Delete("/api/v1/ports/:port", func(c *fiber.Ctx) error { - port, err := c.ParamsInt("port") - if err != nil { - return c.Status(fiber.StatusBadRequest).JSON(api.ErrorResponse{ - Status: "error", - Error: "invalid port number", - }) - } - return handler.DeletePort(c, port) - }) - - return &PortTestContext{ - App: app, - Ctrl: ctrl, - PortService: portService, - Handler: handler, - } -} - -func TestPortHandler_GetPorts_Success(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - expectedPorts := []*domain.AugmentedPort{ - { - Port: domain.Port{ - Port: 8080, - Protocol: "tcp", - Name: "http", - }, - Open: true, - InactiveSince: time.Now(), - InactiveSinceSec: 0, - }, - { - Port: domain.Port{ - Port: 443, - Protocol: "tcp", - Name: "https", - }, - Open: true, - InactiveSinceSec: 10, - }, - } - tc.PortService.EXPECT().GetPorts().Return(expectedPorts) - - req := httptest.NewRequest("GET", "/api/v1/ports", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result []*domain.AugmentedPort - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result) != 2 { - t.Errorf("Expected 2 ports, got %d", len(result)) - } - if result[0].Port.Port != 8080 { - t.Errorf("Expected port 8080, got %d", result[0].Port.Port) - } - if result[1].Port.Port != 443 { - t.Errorf("Expected port 443, got %d", result[1].Port.Port) - } -} - -func TestPortHandler_GetPorts_Empty(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - tc.PortService.EXPECT().GetPorts().Return([]*domain.AugmentedPort{}) - - req := httptest.NewRequest("GET", "/api/v1/ports", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result []*domain.AugmentedPort - json.Unmarshal(body, &result) - - if len(result) != 0 { - t.Errorf("Expected 0 ports, got %d", len(result)) - } -} - -func TestPortHandler_GetPorts_Nil(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - tc.PortService.EXPECT().GetPorts().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/ports", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestPortHandler_GetPorts_WithMandatoryPort(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - expectedPorts := []*domain.AugmentedPort{ - { - Port: domain.Port{ - Port: 25565, - Protocol: "tcp", - Name: "minecraft", - Mandatory: true, - }, - Open: false, - InactiveSinceSec: 120, - }, - } - tc.PortService.EXPECT().GetPorts().Return(expectedPorts) - - req := httptest.NewRequest("GET", "/api/v1/ports", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result []*domain.AugmentedPort - json.Unmarshal(body, &result) - - if len(result) != 1 { - t.Errorf("Expected 1 port, got %d", len(result)) - } - if !result[0].Mandatory { - t.Error("Expected port to be mandatory") - } -} - -func TestPortHandler_AddPort_Success(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - expectedPort := &domain.AugmentedPort{ - Port: domain.Port{ - Port: 8080, - Protocol: "tcp", - Name: "http", - }, - InactiveSince: time.Now(), - } - - tc.PortService.EXPECT().AddPort(domain.Port{ - Port: 8080, - Protocol: "tcp", - Name: "http", - }).Return(expectedPort, nil) - - body := `{"port": 8080, "protocol": "tcp", "name": "http"}` - req := httptest.NewRequest("POST", "/api/v1/ports", strings.NewReader(body)) - req.Header.Set("Content-Type", "application/json") - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 201 { - t.Errorf("Expected status 201, got %d", resp.StatusCode) - } - - respBody, _ := io.ReadAll(resp.Body) - var result domain.AugmentedPort - if err := json.Unmarshal(respBody, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if result.Port.Port != 8080 { - t.Errorf("Expected port 8080, got %d", result.Port.Port) - } - if result.Port.Protocol != "tcp" { - t.Errorf("Expected protocol tcp, got %s", result.Port.Protocol) - } - if result.Port.Name != "http" { - t.Errorf("Expected name http, got %s", result.Port.Name) - } -} - -func TestPortHandler_AddPort_WithOptionalFields(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - expectedPort := &domain.AugmentedPort{ - Port: domain.Port{ - Port: 9090, - Protocol: "udp", - Name: "game", - Mandatory: true, - CheckActivity: true, - Description: "Game server port", - }, - InactiveSince: time.Now(), - } - - tc.PortService.EXPECT().AddPort(domain.Port{ - Port: 9090, - Protocol: "udp", - Name: "game", - Mandatory: true, - CheckActivity: true, - Description: "Game server port", - }).Return(expectedPort, nil) - - body := `{"port": 9090, "protocol": "udp", "name": "game", "mandatory": true, "check_activity": true, "description": "Game server port"}` - req := httptest.NewRequest("POST", "/api/v1/ports", strings.NewReader(body)) - req.Header.Set("Content-Type", "application/json") - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 201 { - t.Errorf("Expected status 201, got %d", resp.StatusCode) - } - - respBody, _ := io.ReadAll(resp.Body) - var result domain.AugmentedPort - json.Unmarshal(respBody, &result) - - if !result.Mandatory { - t.Error("Expected port to be mandatory") - } - if !result.CheckActivity { - t.Error("Expected check_activity to be true") - } - if result.Description != "Game server port" { - t.Errorf("Expected description 'Game server port', got '%s'", result.Description) - } -} - -func TestPortHandler_AddPort_InvalidBody(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - req := httptest.NewRequest("POST", "/api/v1/ports", strings.NewReader("not json")) - req.Header.Set("Content-Type", "application/json") - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } - - respBody, _ := io.ReadAll(resp.Body) - var result api.ErrorResponse - json.Unmarshal(respBody, &result) - - if result.Status != "error" { - t.Errorf("Expected status 'error', got '%s'", result.Status) - } -} - -func TestPortHandler_AddPort_DuplicatePort(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - tc.PortService.EXPECT().AddPort(gomock.Any()).Return(nil, fmt.Errorf("port 8080 is already being watched")) - - body := `{"port": 8080, "protocol": "tcp", "name": "http"}` - req := httptest.NewRequest("POST", "/api/v1/ports", strings.NewReader(body)) - req.Header.Set("Content-Type", "application/json") - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } - - respBody, _ := io.ReadAll(resp.Body) - var result api.ErrorResponse - json.Unmarshal(respBody, &result) - - if result.Error != "port 8080 is already being watched" { - t.Errorf("Expected duplicate port error, got '%s'", result.Error) - } -} - -func TestPortHandler_AddPort_ValidationError(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - tc.PortService.EXPECT().AddPort(gomock.Any()).Return(nil, fmt.Errorf("port number must be between 1 and 65535, got 0")) - - body := `{"port": 0, "protocol": "tcp", "name": "invalid"}` - req := httptest.NewRequest("POST", "/api/v1/ports", strings.NewReader(body)) - req.Header.Set("Content-Type", "application/json") - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -func TestPortHandler_DeletePort_Success(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - tc.PortService.EXPECT().RemovePort(8080).Return(nil) - - req := httptest.NewRequest("DELETE", "/api/v1/ports/8080", nil) - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 204 { - t.Errorf("Expected status 204, got %d", resp.StatusCode) - } -} - -func TestPortHandler_DeletePort_NotFound(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - tc.PortService.EXPECT().RemovePort(9999).Return(fmt.Errorf("port 9999 not found")) - - req := httptest.NewRequest("DELETE", "/api/v1/ports/9999", nil) - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 404 { - t.Errorf("Expected status 404, got %d", resp.StatusCode) - } - - respBody, _ := io.ReadAll(resp.Body) - var result api.ErrorResponse - json.Unmarshal(respBody, &result) - - if result.Error != "port 9999 not found" { - t.Errorf("Expected 'port 9999 not found', got '%s'", result.Error) - } -} - -func TestPortHandler_DeletePort_InvalidPortParam(t *testing.T) { - tc := setupPortTestApp(t) - defer tc.Ctrl.Finish() - - req := httptest.NewRequest("DELETE", "/api/v1/ports/notanumber", nil) - - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} diff --git a/internal/handler/process_handler.go b/internal/handler/process_handler.go deleted file mode 100644 index cd322161..00000000 --- a/internal/handler/process_handler.go +++ /dev/null @@ -1,35 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type ProcessHandler struct { - ProcessManager ports.ProcessManagerInterface -} - -func domainProcessToAPI(dp *domain.Process) api.Process { - return api.Process{ - Name: dp.Name, - Type: dp.Type, - } -} - -func NewProcessHandler(processManager ports.ProcessManagerInterface) *ProcessHandler { - return &ProcessHandler{ProcessManager: processManager} -} - -func (ph ProcessHandler) GetProcesses(c *fiber.Ctx) error { - processes := ph.ProcessManager.GetRunningProcesses() - - // Convert domain processes to API processes - apiProcesses := make(map[string]api.Process, len(processes)) - for k, v := range processes { - apiProcesses[k] = domainProcessToAPI(v) - } - - return c.JSON(api.ProcessesResponse{Processes: apiProcesses}) -} diff --git a/internal/handler/process_handler_test.go b/internal/handler/process_handler_test.go deleted file mode 100644 index bbec7682..00000000 --- a/internal/handler/process_handler_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package handler - -import ( - "encoding/json" - "io" - "net/http/httptest" - "testing" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// ProcessTestContext holds all mocked services for process handler testing -type ProcessTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - ProcessManager *mock_ports.MockProcessManagerInterface - Handler *ProcessHandler -} - -// setupProcessTestApp creates a Fiber app with mocked dependencies for testing -func setupProcessTestApp(t *testing.T) *ProcessTestContext { - ctrl := gomock.NewController(t) - - processManager := mock_ports.NewMockProcessManagerInterface(ctrl) - handler := NewProcessHandler(processManager) - - app := fiber.New() - app.Get("/api/v1/processes", handler.GetProcesses) - - return &ProcessTestContext{ - App: app, - Ctrl: ctrl, - ProcessManager: processManager, - Handler: handler, - } -} - -func TestProcessHandler_Processes_Success(t *testing.T) { - tc := setupProcessTestApp(t) - defer tc.Ctrl.Finish() - - expectedProcesses := map[string]*domain.Process{ - "start": { - Name: "start", - Type: "tty", - }, - "install": { - Name: "install", - Type: "exec", - }, - } - tc.ProcessManager.EXPECT().GetRunningProcesses().Return(expectedProcesses) - - req := httptest.NewRequest("GET", "/api/v1/processes", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ProcessesResponse - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result.Processes) != 2 { - t.Errorf("Expected 2 processes, got %d", len(result.Processes)) - } - if _, ok := result.Processes["start"]; !ok { - t.Error("Expected 'start' process to be present") - } - if _, ok := result.Processes["install"]; !ok { - t.Error("Expected 'install' process to be present") - } -} - -func TestProcessHandler_Processes_Empty(t *testing.T) { - tc := setupProcessTestApp(t) - defer tc.Ctrl.Finish() - - tc.ProcessManager.EXPECT().GetRunningProcesses().Return(map[string]*domain.Process{}) - - req := httptest.NewRequest("GET", "/api/v1/processes", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ProcessesResponse - json.Unmarshal(body, &result) - - if len(result.Processes) != 0 { - t.Errorf("Expected 0 processes, got %d", len(result.Processes)) - } -} - -func TestProcessHandler_Processes_Nil(t *testing.T) { - tc := setupProcessTestApp(t) - defer tc.Ctrl.Finish() - - tc.ProcessManager.EXPECT().GetRunningProcesses().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/processes", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestProcessHandler_Processes_SingleProcess(t *testing.T) { - tc := setupProcessTestApp(t) - defer tc.Ctrl.Finish() - - expectedProcesses := map[string]*domain.Process{ - "main": { - Name: "main", - Type: "tty", - }, - } - tc.ProcessManager.EXPECT().GetRunningProcesses().Return(expectedProcesses) - - req := httptest.NewRequest("GET", "/api/v1/processes", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ProcessesResponse - json.Unmarshal(body, &result) - - if len(result.Processes) != 1 { - t.Errorf("Expected 1 process, got %d", len(result.Processes)) - } - if result.Processes["main"].Name != "main" { - t.Errorf("Expected name 'main', got '%s'", result.Processes["main"].Name) - } -} diff --git a/internal/handler/queue_hander.go b/internal/handler/queue_hander.go deleted file mode 100644 index 9b197eff..00000000 --- a/internal/handler/queue_hander.go +++ /dev/null @@ -1,18 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type QueueHandler struct { - QueueManager ports.QueueManagerInterface -} - -func NewQueueHandler(queueManager ports.QueueManagerInterface) *ScrollHandler { - return &ScrollHandler{QueueManager: queueManager} -} - -func (sl ScrollHandler) GetQueue(c *fiber.Ctx) error { - return c.JSON(sl.QueueManager.GetQueue()) -} diff --git a/internal/handler/queue_handler_test.go b/internal/handler/queue_handler_test.go deleted file mode 100644 index da0c15e2..00000000 --- a/internal/handler/queue_handler_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package handler - -import ( - "encoding/json" - "io" - "net/http/httptest" - "testing" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// QueueTestContext holds all mocked services for queue handler testing -type QueueTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - QueueManager *mock_ports.MockQueueManagerInterface - Handler *ScrollHandler // Note: Queue uses ScrollHandler struct -} - -// setupQueueTestApp creates a Fiber app with mocked dependencies for testing -func setupQueueTestApp(t *testing.T) *QueueTestContext { - ctrl := gomock.NewController(t) - - queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) - // NewQueueHandler returns *ScrollHandler - handler := NewQueueHandler(queueManager) - - app := fiber.New() - app.Get("/api/v1/queue", handler.GetQueue) - - return &QueueTestContext{ - App: app, - Ctrl: ctrl, - QueueManager: queueManager, - Handler: handler, - } -} - -func TestQueueHandler_Queue_Success(t *testing.T) { - tc := setupQueueTestApp(t) - defer tc.Ctrl.Finish() - - expectedQueue := map[string]domain.ScrollLockStatus{ - "install": "done", - "start": "running", - "backup": "waiting", - } - tc.QueueManager.EXPECT().GetQueue().Return(expectedQueue) - - req := httptest.NewRequest("GET", "/api/v1/queue", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result map[string]domain.ScrollLockStatus - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result) != 3 { - t.Errorf("Expected 3 queue items, got %d", len(result)) - } - if result["install"] != "done" { - t.Errorf("Expected install status 'done', got '%s'", result["install"]) - } - if result["start"] != "running" { - t.Errorf("Expected start status 'running', got '%s'", result["start"]) - } -} - -func TestQueueHandler_Queue_Empty(t *testing.T) { - tc := setupQueueTestApp(t) - defer tc.Ctrl.Finish() - - tc.QueueManager.EXPECT().GetQueue().Return(map[string]domain.ScrollLockStatus{}) - - req := httptest.NewRequest("GET", "/api/v1/queue", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result map[string]domain.ScrollLockStatus - json.Unmarshal(body, &result) - - if len(result) != 0 { - t.Errorf("Expected 0 queue items, got %d", len(result)) - } -} - -func TestQueueHandler_Queue_Nil(t *testing.T) { - tc := setupQueueTestApp(t) - defer tc.Ctrl.Finish() - - tc.QueueManager.EXPECT().GetQueue().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/queue", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestQueueHandler_Queue_SingleItem(t *testing.T) { - tc := setupQueueTestApp(t) - defer tc.Ctrl.Finish() - - expectedQueue := map[string]domain.ScrollLockStatus{ - "init": "done", - } - tc.QueueManager.EXPECT().GetQueue().Return(expectedQueue) - - req := httptest.NewRequest("GET", "/api/v1/queue", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result map[string]domain.ScrollLockStatus - json.Unmarshal(body, &result) - - if len(result) != 1 { - t.Errorf("Expected 1 queue item, got %d", len(result)) - } -} diff --git a/internal/handler/scroll_handler.go b/internal/handler/scroll_handler.go deleted file mode 100644 index 212efad8..00000000 --- a/internal/handler/scroll_handler.go +++ /dev/null @@ -1,148 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -type ScrollHandler struct { - ScrollService ports.ScrollServiceInterface - PluginManager ports.PluginManagerInterface - ProcessLauncher ports.ProcedureLauchnerInterface - QueueManager ports.QueueManagerInterface - ProcessManager ports.ProcessManagerInterface -} - -func NewScrollHandler( - scrollService ports.ScrollServiceInterface, - pluginManager ports.PluginManagerInterface, - processLauncher ports.ProcedureLauchnerInterface, - queueManager ports.QueueManagerInterface, - processManager ports.ProcessManagerInterface, -) *ScrollHandler { - return &ScrollHandler{ScrollService: scrollService, PluginManager: pluginManager, ProcessLauncher: processLauncher, QueueManager: queueManager, ProcessManager: processManager} -} - -func (sl ScrollHandler) GetScroll(c *fiber.Ctx) error { - return c.JSON(sl.ScrollService.GetFile()) -} - -func (sl ScrollHandler) RunCommand(c *fiber.Ctx) error { - var requestBody api.StartCommandRequest - - err := c.BodyParser(&requestBody) - if err != nil { - return c.SendStatus(400) - } - - // Handle optional Sync field - sync := false - if requestBody.Sync != nil { - sync = *requestBody.Sync - } - - if sync { - err = sl.QueueManager.AddTempItemWithWait(requestBody.Command) - if err != nil { - logger.Log().Error("Error running command (sync)", zap.Error(err)) - return c.SendStatus(500) - } - return c.SendStatus(200) - } else { - go func() { - err = sl.QueueManager.AddTempItem(requestBody.Command) - if err != nil { - logger.Log().Error("Error running command (async)", zap.Error(err)) - } - }() - c.SendStatus(201) - return nil - } -} - -func (sl ScrollHandler) RunProcedure(c *fiber.Ctx) error { - var requestBody api.StartProcedureRequest - - err := c.BodyParser(&requestBody) - if err != nil { - return c.SendStatus(400) - } - - if !sl.PluginManager.CanRunStandaloneProcedure(requestBody.Mode) && requestBody.Mode != "stdin" { - c.SendString("Not allowed to run this mode as standalone procedure.") - return c.SendStatus(400) - } - if requestBody.Data == "" { - c.SendString("Data cannot be empty") - return c.SendStatus(400) - } - - var procedure domain.Procedure - if requestBody.Mode == "stdin" { - procedure = domain.Procedure{ - Data: []interface{}{ - requestBody.Process, - requestBody.Data, - }, - Mode: requestBody.Mode, - } - } else { - procedure = domain.Procedure{ - Data: requestBody.Data, - Mode: requestBody.Mode, - } - } - - command := requestBody.Process - - // Handle optional Dependencies field - deps := []string{} - if requestBody.Dependencies != nil { - deps = *requestBody.Dependencies - } - - process := sl.ProcessManager.GetRunningProcess(command) - if process == nil { - c.SendString("Running process not found") - return c.SendStatus(400) - } - - // Handle optional Sync field - sync := false - if requestBody.Sync != nil { - sync = *requestBody.Sync - } - - if !sync { - go sl.ProcessLauncher.RunProcedure(&procedure, command, deps) - return c.SendStatus(201) - } else { - res, _, err := sl.ProcessLauncher.RunProcedure(&procedure, command, deps) - if err != nil { - c.SendString(err.Error()) - return c.SendStatus(400) - } - return c.JSON(res) - } -} - -func (sh ScrollHandler) GetProcedures(c *fiber.Ctx) error { - process := sh.ProcessLauncher.GetProcedureStatuses() - return c.JSON(process) -} - -func (sh ScrollHandler) AddCommand(c *fiber.Ctx, command string) error { - - var commands *domain.CommandInstructionSet - err := c.BodyParser(&commands) - if err != nil { - return c.SendStatus(400) - } - sh.ScrollService.AddTemporaryCommand(command, commands) - - return c.SendStatus(201) -} diff --git a/internal/handler/scroll_handler_test.go b/internal/handler/scroll_handler_test.go deleted file mode 100644 index e1694e80..00000000 --- a/internal/handler/scroll_handler_test.go +++ /dev/null @@ -1,677 +0,0 @@ -package handler - -import ( - "bytes" - "encoding/json" - "io" - "net/http/httptest" - "testing" - "time" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// Helper functions for creating pointer values in test structs -func boolPtr(b bool) *bool { - return &b -} - -func stringSlicePtr(s []string) *[]string { - return &s -} - -// TestContext holds all mocked services for testing -type TestContext struct { - App *fiber.App - Ctrl *gomock.Controller - ScrollService *mock_ports.MockScrollServiceInterface - PluginManager *mock_ports.MockPluginManagerInterface - ProcedureLauncher *mock_ports.MockProcedureLauchnerInterface - QueueManager *mock_ports.MockQueueManagerInterface - ProcessManager *mock_ports.MockProcessManagerInterface - Handler *ScrollHandler -} - -// setupTestApp creates a Fiber app with mocked dependencies for testing -func setupTestApp(t *testing.T) *TestContext { - ctrl := gomock.NewController(t) - - // Create mocked services - scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - pluginManager := mock_ports.NewMockPluginManagerInterface(ctrl) - procedureLauncher := mock_ports.NewMockProcedureLauchnerInterface(ctrl) - queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) - processManager := mock_ports.NewMockProcessManagerInterface(ctrl) - - // Create handler with mocks - handler := NewScrollHandler(scrollService, pluginManager, procedureLauncher, queueManager, processManager) - - // Create minimal Fiber app for testing - app := fiber.New() - app.Get("/api/v1/scroll", handler.GetScroll) - app.Post("/api/v1/command", handler.RunCommand) - app.Post("/api/v1/procedure", handler.RunProcedure) - app.Get("/api/v1/procedures", handler.GetProcedures) - - return &TestContext{ - App: app, - Ctrl: ctrl, - ScrollService: scrollService, - PluginManager: pluginManager, - ProcedureLauncher: procedureLauncher, - QueueManager: queueManager, - ProcessManager: processManager, - Handler: handler, - } -} - -// ============================================================================ -// GET /api/v1/scroll Tests -// ============================================================================ - -func TestScrollHandler_GetScroll_Success(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations - expectedFile := &domain.File{ - Name: "test-scroll", - Desc: "Test scroll description", - AppVersion: "1.0.0", - } - tc.ScrollService.EXPECT().GetFile().Return(expectedFile) - - // Create request - req := httptest.NewRequest("GET", "/api/v1/scroll", nil) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - // Verify response body - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Failed to read response body: %v", err) - } - - var result domain.File - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if result.Name != expectedFile.Name { - t.Errorf("Expected name %s, got %s", expectedFile.Name, result.Name) - } - if result.Desc != expectedFile.Desc { - t.Errorf("Expected desc %s, got %s", expectedFile.Desc, result.Desc) - } -} - -func TestScrollHandler_GetScroll_NilFile(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock to return nil - tc.ScrollService.EXPECT().GetFile().Return(nil) - - // Create request - req := httptest.NewRequest("GET", "/api/v1/scroll", nil) - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Should still return 200 with null body - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -// ============================================================================ -// POST /api/v1/command Tests -// ============================================================================ - -func TestScrollHandler_RunCommand_SyncSuccess(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations for sync command - tc.QueueManager.EXPECT().AddTempItemWithWait("test-command").Return(nil) - - // Create request body - requestBody := api.StartCommandRequest{ - Command: "test-command", - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/command", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (200 for sync) - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunCommand_AsyncSuccess(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations for async command - tc.QueueManager.EXPECT().AddTempItem("test-command").Return(nil) - - // Create request body - requestBody := api.StartCommandRequest{ - Command: "test-command", - Sync: boolPtr(false), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/command", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (201 for async) - if resp.StatusCode != 201 { - t.Errorf("Expected status 201, got %d", resp.StatusCode) - } - - // Give async goroutine time to complete - time.Sleep(100 * time.Millisecond) -} - -func TestScrollHandler_RunCommand_InvalidBody(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Create request with invalid JSON - req := httptest.NewRequest("POST", "/api/v1/command", bytes.NewReader([]byte("invalid json"))) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for invalid body) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunCommand_SyncError(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock to return error - tc.QueueManager.EXPECT().AddTempItemWithWait("test-command").Return(fiber.NewError(500, "internal error")) - - // Create request body - requestBody := api.StartCommandRequest{ - Command: "test-command", - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/command", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (500 for error) - if resp.StatusCode != 500 { - t.Errorf("Expected status 500, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunCommand_EmptyBody(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Create request with empty body - req := httptest.NewRequest("POST", "/api/v1/command", nil) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for empty body) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -// ============================================================================ -// POST /api/v1/procedure Tests -// ============================================================================ - -func TestScrollHandler_RunProcedure_SyncSuccess(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("rcon").Return(true) - tc.ProcessManager.EXPECT().GetRunningProcess("test-process").Return(&domain.Process{}) - tc.ProcedureLauncher.EXPECT().RunProcedure(gomock.Any(), "test-process", []string{"dep1"}).Return("result", nil, nil) - - // Create request body - requestBody := api.StartProcedureRequest{ - Mode: "rcon", - Data: "test-data", - Process: "test-process", - Dependencies: stringSlicePtr([]string{"dep1"}), - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (200 for sync success) - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - t.Errorf("Expected status 200, got %d, body: %s", resp.StatusCode, string(body)) - } -} - -func TestScrollHandler_RunProcedure_AsyncSuccess(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("rcon").Return(true) - tc.ProcessManager.EXPECT().GetRunningProcess("test-process").Return(&domain.Process{}) - tc.ProcedureLauncher.EXPECT().RunProcedure(gomock.Any(), "test-process", []string{}).Return("", nil, nil) - - // Create request body - requestBody := api.StartProcedureRequest{ - Mode: "rcon", - Data: "test-data", - Process: "test-process", - Dependencies: stringSlicePtr([]string{}), - Sync: boolPtr(false), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (201 for async) - if resp.StatusCode != 201 { - t.Errorf("Expected status 201, got %d", resp.StatusCode) - } - - // Give async goroutine time to complete - time.Sleep(100 * time.Millisecond) -} - -func TestScrollHandler_RunProcedure_StdinMode(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations - stdin mode is always allowed - // Note: CanRunStandaloneProcedure is still called due to evaluation order, but the condition short-circuits - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("stdin").Return(false) - tc.ProcessManager.EXPECT().GetRunningProcess("test-process").Return(&domain.Process{}) - tc.ProcedureLauncher.EXPECT().RunProcedure(gomock.Any(), "test-process", []string{}).Return("result", nil, nil) - - // Create request body with stdin mode - requestBody := api.StartProcedureRequest{ - Mode: "stdin", - Data: "test-data", - Process: "test-process", - Dependencies: stringSlicePtr([]string{}), - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - t.Errorf("Expected status 200, got %d, body: %s", resp.StatusCode, string(body)) - } -} - -func TestScrollHandler_RunProcedure_InvalidMode(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock - mode not allowed - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("invalid-mode").Return(false) - - // Create request body - requestBody := api.StartProcedureRequest{ - Mode: "invalid-mode", - Data: "test-data", - Process: "test-process", - Dependencies: stringSlicePtr([]string{}), - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for invalid mode) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunProcedure_EmptyData(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("rcon").Return(true) - - // Create request body with empty data - requestBody := api.StartProcedureRequest{ - Mode: "rcon", - Data: "", - Process: "test-process", - Dependencies: stringSlicePtr([]string{}), - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for empty data) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunProcedure_ProcessNotFound(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock - process not found - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("rcon").Return(true) - tc.ProcessManager.EXPECT().GetRunningProcess("non-existent").Return(nil) - - // Create request body - requestBody := api.StartProcedureRequest{ - Mode: "rcon", - Data: "test-data", - Process: "non-existent", - Dependencies: stringSlicePtr([]string{}), - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for process not found) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunProcedure_InvalidBody(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Create request with invalid JSON - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader([]byte("invalid json"))) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for invalid body) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -func TestScrollHandler_RunProcedure_SyncError(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations - procedure returns error - tc.PluginManager.EXPECT().CanRunStandaloneProcedure("rcon").Return(true) - tc.ProcessManager.EXPECT().GetRunningProcess("test-process").Return(&domain.Process{}) - tc.ProcedureLauncher.EXPECT().RunProcedure(gomock.Any(), "test-process", []string{}).Return("", nil, fiber.NewError(500, "procedure failed")) - - // Create request body - requestBody := api.StartProcedureRequest{ - Mode: "rcon", - Data: "test-data", - Process: "test-process", - Dependencies: stringSlicePtr([]string{}), - Sync: boolPtr(true), - } - bodyBytes, _ := json.Marshal(requestBody) - - // Create request - req := httptest.NewRequest("POST", "/api/v1/procedure", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status (400 for procedure error) - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } -} - -// ============================================================================ -// GET /api/v1/procedures Tests -// ============================================================================ - -func TestScrollHandler_Procedures_Success(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock expectations - expectedStatuses := map[string]domain.ScrollLockStatus{ - "install": "done", - "start": "running", - } - tc.ProcedureLauncher.EXPECT().GetProcedureStatuses().Return(expectedStatuses) - - // Create request - req := httptest.NewRequest("GET", "/api/v1/procedures", nil) - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - // Verify response body - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Failed to read response body: %v", err) - } - - var result map[string]domain.ScrollLockStatus - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result) != len(expectedStatuses) { - t.Errorf("Expected %d statuses, got %d", len(expectedStatuses), len(result)) - } - - for key, expectedValue := range expectedStatuses { - if result[key] != expectedValue { - t.Errorf("Expected status %s for %s, got %s", expectedValue, key, result[key]) - } - } -} - -func TestScrollHandler_Procedures_Empty(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock to return empty map - tc.ProcedureLauncher.EXPECT().GetProcedureStatuses().Return(map[string]domain.ScrollLockStatus{}) - - // Create request - req := httptest.NewRequest("GET", "/api/v1/procedures", nil) - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Verify response status - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - // Verify empty response - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Failed to read response body: %v", err) - } - - var result map[string]domain.ScrollLockStatus - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result) != 0 { - t.Errorf("Expected 0 statuses, got %d", len(result)) - } -} - -func TestScrollHandler_Procedures_NilMap(t *testing.T) { - tc := setupTestApp(t) - defer tc.Ctrl.Finish() - - // Setup mock to return nil - tc.ProcedureLauncher.EXPECT().GetProcedureStatuses().Return(nil) - - // Create request - req := httptest.NewRequest("GET", "/api/v1/procedures", nil) - - // Execute request - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - // Should still return 200 - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} diff --git a/internal/handler/scroll_log_handler.go b/internal/handler/scroll_log_handler.go deleted file mode 100644 index 81bef925..00000000 --- a/internal/handler/scroll_log_handler.go +++ /dev/null @@ -1,82 +0,0 @@ -package handler - -import ( - "net/http" - "sync" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type ScrollLogHandler struct { - scrollService ports.ScrollServiceInterface - logManager ports.LogManagerInterface - processManager ports.ProcessManagerInterface -} - -func NewScrollLogHandler(scrollService ports.ScrollServiceInterface, logManager ports.LogManagerInterface, processManager ports.ProcessManagerInterface) *ScrollLogHandler { - return &ScrollLogHandler{scrollService: scrollService, logManager: logManager, processManager: processManager} -} - -func (sl ScrollLogHandler) ListAllLogs(c *fiber.Ctx) error { - - streams := sl.logManager.GetStreams() - - responseData := make([]api.ScrollLogStream, 0, len(streams)) - mutex := sync.Mutex{} - wg := sync.WaitGroup{} - - for streamName, log := range streams { - req := make(chan []byte) - wg.Add(1) - log.Req <- req - go func(streamName string, res <-chan []byte, log *domain.Log) { - defer wg.Done() - - logResponse := api.ScrollLogStream{ - Key: streamName, - Log: make([]string, 0, log.Capacity), - } - for { - cmd, ok := <-res - if !ok { - break - } - logResponse.Log = append(logResponse.Log, string(cmd)) - } - mutex.Lock() - defer mutex.Unlock() - responseData = append(responseData, logResponse) - }(streamName, req, log) - } - wg.Wait() - return c.JSON(responseData) -} - -func (sl ScrollLogHandler) ListStreamLogs(c *fiber.Ctx, stream string) error { - - steam, ok := sl.logManager.GetStreams()[c.Params("stream")] - if !ok { - c.SendStatus(http.StatusNotFound) - return nil - } - - responseData := api.ScrollLogStream{ - Key: c.Params("stream"), - Log: make([]string, 0, steam.Capacity), - } - req := make(chan []byte) - steam.Req <- req - - for { - res, ok := <-req - if !ok { - break - } - responseData.Log = append(responseData.Log, string(res)) - } - - return c.JSON(responseData) -} diff --git a/internal/handler/scroll_metric_handler.go b/internal/handler/scroll_metric_handler.go deleted file mode 100644 index 3c063c84..00000000 --- a/internal/handler/scroll_metric_handler.go +++ /dev/null @@ -1,28 +0,0 @@ -package handler - -import ( - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" -) - -type ScrollMetricHandler struct { - ScrollService ports.ScrollServiceInterface - ProcessMonitor ports.ProcessMonitorInterface -} - -func NewScrollMetricHandler(scrollService ports.ScrollServiceInterface, processMonitor ports.ProcessMonitorInterface) *ScrollMetricHandler { - return &ScrollMetricHandler{ScrollService: scrollService, ProcessMonitor: processMonitor} -} - -// Keep original type aliases (use pointers to match service return types) -type PsTress = map[string]*domain.ProcessTreeRoot -type Metrics = map[string]*domain.ProcessMonitorMetrics - -func (sl ScrollMetricHandler) GetMetrics(c *fiber.Ctx) error { - return c.JSON(sl.ProcessMonitor.GetAllProcessesMetrics()) -} - -func (sl ScrollMetricHandler) GetPsTree(c *fiber.Ctx) error { - return c.JSON(sl.ProcessMonitor.GetPsTrees()) -} diff --git a/internal/handler/scroll_metric_handler_test.go b/internal/handler/scroll_metric_handler_test.go deleted file mode 100644 index a4089d8e..00000000 --- a/internal/handler/scroll_metric_handler_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package handler - -import ( - "encoding/json" - "io" - "net/http/httptest" - "testing" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// ScrollMetricTestContext holds all mocked services for scroll metric handler testing -type ScrollMetricTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - ScrollService *mock_ports.MockScrollServiceInterface - ProcessMonitor *mock_ports.MockProcessMonitorInterface - Handler *ScrollMetricHandler -} - -// setupScrollMetricTestApp creates a Fiber app with mocked dependencies for testing -func setupScrollMetricTestApp(t *testing.T) *ScrollMetricTestContext { - ctrl := gomock.NewController(t) - - scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - processMonitor := mock_ports.NewMockProcessMonitorInterface(ctrl) - handler := NewScrollMetricHandler(scrollService, processMonitor) - - app := fiber.New() - app.Get("/api/v1/metrics", handler.GetMetrics) - app.Get("/api/v1/pstree", handler.GetPsTree) - - return &ScrollMetricTestContext{ - App: app, - Ctrl: ctrl, - ScrollService: scrollService, - ProcessMonitor: processMonitor, - Handler: handler, - } -} - -func TestScrollMetricHandler_Metrics_Success(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - expectedMetrics := map[string]*domain.ProcessMonitorMetrics{ - "start": { - Cpu: 25.5, - Memory: 1024000, - Pid: 1234, - }, - "worker": { - Cpu: 10.0, - Memory: 512000, - Pid: 5678, - }, - } - tc.ProcessMonitor.EXPECT().GetAllProcessesMetrics().Return(expectedMetrics) - - req := httptest.NewRequest("GET", "/api/v1/metrics", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result map[string]*domain.ProcessMonitorMetrics - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result) != 2 { - t.Errorf("Expected 2 metrics, got %d", len(result)) - } - if result["start"].Cpu != 25.5 { - t.Errorf("Expected CPU 25.5, got %f", result["start"].Cpu) - } -} - -func TestScrollMetricHandler_Metrics_Empty(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - tc.ProcessMonitor.EXPECT().GetAllProcessesMetrics().Return(map[string]*domain.ProcessMonitorMetrics{}) - - req := httptest.NewRequest("GET", "/api/v1/metrics", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestScrollMetricHandler_Metrics_Nil(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - tc.ProcessMonitor.EXPECT().GetAllProcessesMetrics().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/metrics", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestScrollMetricHandler_PsTree_Success(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - expectedPsTree := map[string]*domain.ProcessTreeRoot{ - "start": { - TotalProcessCount: 5, - TotalCpuPercent: 25.0, - }, - } - tc.ProcessMonitor.EXPECT().GetPsTrees().Return(expectedPsTree) - - req := httptest.NewRequest("GET", "/api/v1/pstree", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result map[string]*domain.ProcessTreeRoot - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result) != 1 { - t.Errorf("Expected 1 ps tree entry, got %d", len(result)) - } - if result["start"].TotalProcessCount != 5 { - t.Errorf("Expected TotalProcessCount 5, got %d", result["start"].TotalProcessCount) - } -} - -func TestScrollMetricHandler_PsTree_Empty(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - tc.ProcessMonitor.EXPECT().GetPsTrees().Return(map[string]*domain.ProcessTreeRoot{}) - - req := httptest.NewRequest("GET", "/api/v1/pstree", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestScrollMetricHandler_PsTree_Nil(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - tc.ProcessMonitor.EXPECT().GetPsTrees().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/pstree", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestScrollMetricHandler_PsTree_MultipleProcesses(t *testing.T) { - tc := setupScrollMetricTestApp(t) - defer tc.Ctrl.Finish() - - expectedPsTree := map[string]*domain.ProcessTreeRoot{ - "start": { - TotalProcessCount: 3, - TotalCpuPercent: 15.0, - }, - "worker": { - TotalProcessCount: 2, - TotalCpuPercent: 10.0, - }, - } - tc.ProcessMonitor.EXPECT().GetPsTrees().Return(expectedPsTree) - - req := httptest.NewRequest("GET", "/api/v1/pstree", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result map[string]*domain.ProcessTreeRoot - json.Unmarshal(body, &result) - - if len(result) != 2 { - t.Errorf("Expected 2 ps tree entries, got %d", len(result)) - } -} diff --git a/internal/handler/watch_handler.go b/internal/handler/watch_handler.go deleted file mode 100644 index 9330228d..00000000 --- a/internal/handler/watch_handler.go +++ /dev/null @@ -1,233 +0,0 @@ -package handler - -import ( - "encoding/json" - "time" - - "github.com/gofiber/contrib/websocket" - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -type WatchHandler struct { - uiWatchService ports.WatchServiceInterface - scrollService ports.ScrollServiceInterface -} - -func NewWatchHandler(uiWatchService ports.WatchServiceInterface, scrollService ports.ScrollServiceInterface) *WatchHandler { - return &WatchHandler{ - uiWatchService: uiWatchService, - scrollService: scrollService, - } -} - -func (udh *WatchHandler) EnableWatch(c *fiber.Ctx) error { - if udh.uiWatchService.IsWatching() { - response := api.WatchModeResponse{ - Status: "already-active", - Enabled: true, - } - c.Status(fiber.StatusPreconditionFailed) - return c.JSON(response) - } - - // Get current scroll to determine watch paths - scrollDir := udh.scrollService.GetDir() - if scrollDir == "" { - logger.Log().Error("Cannot enable development mode: No scroll loaded") - errorResponse := api.ErrorResponse{ - Status: "error", - Error: "No scroll loaded. Please load a scroll before enabling development mode.", - } - return c.Status(400).JSON(errorResponse) - } - - var requestBody api.WatchModeRequest - - err := c.BodyParser(&requestBody) - if err == nil && requestBody.HotReloadCommands != nil { - err = udh.uiWatchService.SetHotReloadCommands(*requestBody.HotReloadCommands) - if err != nil { - logger.Log().Error("Invalid hot reload commands", zap.Error(err)) - errorResponse := api.ErrorResponse{ - Status: "error", - Error: err.Error(), - } - return c.Status(400).JSON(errorResponse) - } - } - - watchPaths := requestBody.WatchPaths - - if len(watchPaths) == 0 { - return c.Status(400).JSON(api.ErrorResponse{ - Status: "error", - Error: "At least one watch path must be specified", - }) - } - - // Start file watching with scroll directory as base path - err = udh.uiWatchService.StartWatching(scrollDir, watchPaths...) - if err != nil { - logger.Log().Error("Failed to start file watcher", zap.Error(err)) - errorResponse := api.ErrorResponse{ - Status: "error", - Error: err.Error(), - } - return c.Status(500).JSON(errorResponse) - } - - logger.Log().Info("UI development mode enabled") - - response := api.WatchModeResponse{ - Status: "success", - Enabled: udh.uiWatchService.IsWatching(), - } - return c.JSON(response) -} - -func (udh *WatchHandler) DisableWatch(c *fiber.Ctx) error { - if !udh.uiWatchService.IsWatching() { - response := api.WatchModeResponse{ - Status: "success", - Enabled: false, - } - return c.JSON(response) - } - - // Stop file watching - err := udh.uiWatchService.StopWatching() - if err != nil { - logger.Log().Error("Failed to stop file watcher", zap.Error(err)) - errorResponse := api.ErrorResponse{ - Status: "error", - Error: err.Error(), - } - return c.Status(500).JSON(errorResponse) - } - - logger.Log().Info("UI development mode disabled") - - response := api.WatchModeResponse{ - Status: "success", - Enabled: udh.uiWatchService.IsWatching(), - } - return c.JSON(response) -} - -func (udh *WatchHandler) GetWatchStatus(c *fiber.Ctx) error { - isWatching := udh.uiWatchService.IsWatching() - response := api.WatchStatusResponse{ - Enabled: isWatching, - WatchedPaths: udh.uiWatchService.GetWatchedPaths(), - } - return c.JSON(response) -} - -// NotifyChange handles WebSocket connections for real-time file change notifications -func (udh *WatchHandler) NotifyChange(c *websocket.Conn) { - defer c.Close() - - // Check if development mode is enabled - if !udh.uiWatchService.IsWatching() { - logger.Log().Warn("WebSocket connection attempted but development mode is not enabled") - c.WriteJSON(map[string]interface{}{ - "type": "error", - "message": "Watchelopment mode is not enabled", - }) - return - } - - // Subscribe to file change notifications - changesChan := udh.uiWatchService.Subscribe() - if changesChan == nil { - logger.Log().Error("Failed to subscribe to file changes") - c.WriteJSON(map[string]interface{}{ - "type": "error", - "message": "Failed to subscribe to file changes", - }) - return - } - defer udh.uiWatchService.Unsubscribe(changesChan) - - // Set up ping/pong - c.SetReadDeadline(time.Now().Add(60 * time.Second)) - c.SetPongHandler(func(string) error { - c.SetReadDeadline(time.Now().Add(60 * time.Second)) - return nil - }) - - // Send initial connection message - c.SetWriteDeadline(time.Now().Add(10 * time.Second)) - if err := c.WriteJSON(map[string]interface{}{ - "type": "connected", - "message": "Connected to file watcher", - "watchedPaths": udh.uiWatchService.GetWatchedPaths(), - "timestamp": time.Now(), - }); err != nil { - logger.Log().Debug("Failed to send initial message, client disconnected", zap.Error(err)) - return - } - - logger.Log().Info("WebSocket client connected for file change notifications") - - // Create ping ticker - pingTicker := time.NewTicker(54 * time.Second) - defer pingTicker.Stop() - - // Start reader goroutine to detect disconnects - done := make(chan struct{}) - go func() { - defer close(done) - for { - _, _, err := c.ReadMessage() - if err != nil { - logger.Log().Debug("WebSocket client disconnected", zap.Error(err)) - return - } - } - }() - - // Main event loop - for { - select { - case <-done: - return - - case data := <-changesChan: - if data == nil { - return - } - - // Parse and send file change event - var fileEvent map[string]interface{} - if err := json.Unmarshal(*data, &fileEvent); err != nil { - logger.Log().Error("Failed to parse file change event", zap.Error(err)) - continue - } - - c.SetWriteDeadline(time.Now().Add(10 * time.Second)) - if err := c.WriteJSON(map[string]interface{}{ - "type": "file_change", - "data": fileEvent, - "timestamp": time.Now(), - }); err != nil { - logger.Log().Debug("Failed to send file change, client disconnected", zap.Error(err)) - return - } - - case <-pingTicker.C: - c.SetWriteDeadline(time.Now().Add(10 * time.Second)) - if err := c.WriteMessage(websocket.PingMessage, nil); err != nil { - logger.Log().Debug("Failed to send ping, client disconnected", zap.Error(err)) - return - } - } - } -} - -// Ensure WatchHandler implements WatchHandlerInterface at compile time -var _ ports.WatchHandlerInterface = (*WatchHandler)(nil) diff --git a/internal/handler/watch_handler_test.go b/internal/handler/watch_handler_test.go deleted file mode 100644 index 029547cf..00000000 --- a/internal/handler/watch_handler_test.go +++ /dev/null @@ -1,362 +0,0 @@ -package handler - -import ( - "bytes" - "encoding/json" - "io" - "net/http/httptest" - "testing" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// WatchTestContext holds all mocked services for watch handler testing -type WatchTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - WatchService *mock_ports.MockWatchServiceInterface - ScrollService *mock_ports.MockScrollServiceInterface - Handler *WatchHandler -} - -// setupWatchTestApp creates a Fiber app with mocked dependencies for testing -func setupWatchTestApp(t *testing.T) *WatchTestContext { - ctrl := gomock.NewController(t) - - watchService := mock_ports.NewMockWatchServiceInterface(ctrl) - scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - - handler := NewWatchHandler(watchService, scrollService) - - app := fiber.New() - app.Post("/api/v1/watch/enable", handler.EnableWatch) - app.Post("/api/v1/watch/disable", handler.DisableWatch) - app.Get("/api/v1/watch/status", handler.GetWatchStatus) - - return &WatchTestContext{ - App: app, - Ctrl: ctrl, - WatchService: watchService, - ScrollService: scrollService, - Handler: handler, - } -} - -// ============================================================================ -// POST /api/v1/watch/enable Tests -// ============================================================================ - -func TestWatchHandler_Enable_Success(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(false) - tc.ScrollService.EXPECT().GetDir().Return("/path/to/scroll") - tc.WatchService.EXPECT().StartWatching("/path/to/scroll", "src", "config").Return(nil) - tc.WatchService.EXPECT().IsWatching().Return(true) - - requestBody := api.WatchModeRequest{ - WatchPaths: []string{"src", "config"}, - } - bodyBytes, _ := json.Marshal(requestBody) - - req := httptest.NewRequest("POST", "/api/v1/watch/enable", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - t.Errorf("Expected status 200, got %d, body: %s", resp.StatusCode, string(body)) - } - - body, _ := io.ReadAll(resp.Body) - var result api.WatchModeResponse - json.Unmarshal(body, &result) - - if result.Status != "success" { - t.Errorf("Expected status 'success', got '%s'", result.Status) - } - if !result.Enabled { - t.Error("Expected enabled to be true") - } -} - -func TestWatchHandler_Enable_AlreadyActive(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(true) - - req := httptest.NewRequest("POST", "/api/v1/watch/enable", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 412 { - t.Errorf("Expected status 412, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.WatchModeResponse - json.Unmarshal(body, &result) - - if result.Status != "already-active" { - t.Errorf("Expected status 'already-active', got '%s'", result.Status) - } -} - -func TestWatchHandler_Enable_NoScrollLoaded(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(false) - tc.ScrollService.EXPECT().GetDir().Return("") - - req := httptest.NewRequest("POST", "/api/v1/watch/enable", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 400 { - t.Errorf("Expected status 400, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ErrorResponse - json.Unmarshal(body, &result) - - if result.Status != "error" { - t.Errorf("Expected status 'error', got '%s'", result.Status) - } -} - -func TestWatchHandler_Enable_StartWatchingError(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(false) - tc.ScrollService.EXPECT().GetDir().Return("/path/to/scroll") - tc.WatchService.EXPECT().StartWatching("/path/to/scroll", "src").Return(fiber.NewError(500, "watcher error")) - - requestBody := api.WatchModeRequest{ - WatchPaths: []string{"src"}, - } - bodyBytes, _ := json.Marshal(requestBody) - - req := httptest.NewRequest("POST", "/api/v1/watch/enable", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 500 { - t.Errorf("Expected status 500, got %d", resp.StatusCode) - } -} - -func TestWatchHandler_Enable_WithCommands(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(false) - tc.ScrollService.EXPECT().GetDir().Return("/path/to/scroll") - tc.WatchService.EXPECT().SetHotReloadCommands([]string{"npm run dev"}) - tc.WatchService.EXPECT().StartWatching("/path/to/scroll", "src", "lib").Return(nil) - tc.WatchService.EXPECT().IsWatching().Return(true) - - hotReloadCmds := []string{"npm run dev"} - requestBody := api.WatchModeRequest{ - HotReloadCommands: &hotReloadCmds, - WatchPaths: []string{"src", "lib"}, - } - bodyBytes, _ := json.Marshal(requestBody) - - req := httptest.NewRequest("POST", "/api/v1/watch/enable", bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -// ============================================================================ -// POST /api/v1/watch/disable Tests -// ============================================================================ - -func TestWatchHandler_Disable_Success(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(true) - tc.WatchService.EXPECT().StopWatching().Return(nil) - tc.WatchService.EXPECT().IsWatching().Return(false) - - req := httptest.NewRequest("POST", "/api/v1/watch/disable", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.WatchModeResponse - json.Unmarshal(body, &result) - - if result.Status != "success" { - t.Errorf("Expected status 'success', got '%s'", result.Status) - } - if result.Enabled { - t.Error("Expected enabled to be false") - } -} - -func TestWatchHandler_Disable_NotWatching(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(false) - - req := httptest.NewRequest("POST", "/api/v1/watch/disable", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.WatchModeResponse - json.Unmarshal(body, &result) - - if result.Status != "success" { - t.Errorf("Expected status 'success', got '%s'", result.Status) - } -} - -func TestWatchHandler_Disable_StopWatchingError(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(true) - tc.WatchService.EXPECT().StopWatching().Return(fiber.NewError(500, "stop error")) - - req := httptest.NewRequest("POST", "/api/v1/watch/disable", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 500 { - t.Errorf("Expected status 500, got %d", resp.StatusCode) - } -} - -// ============================================================================ -// GET /api/v1/watch/status Tests -// ============================================================================ - -func TestWatchHandler_Status_Enabled(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - watchedPaths := []string{"/path/to/public/src", "/path/to/private/src"} - tc.WatchService.EXPECT().IsWatching().Return(true) - tc.WatchService.EXPECT().GetWatchedPaths().Return(watchedPaths) - - req := httptest.NewRequest("GET", "/api/v1/watch/status", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.WatchStatusResponse - json.Unmarshal(body, &result) - - if !result.Enabled { - t.Error("Expected enabled to be true") - } - if len(result.WatchedPaths) != 2 { - t.Errorf("Expected 2 watched paths, got %d", len(result.WatchedPaths)) - } -} - -func TestWatchHandler_Status_Disabled(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(false) - tc.WatchService.EXPECT().GetWatchedPaths().Return([]string{}) - - req := httptest.NewRequest("GET", "/api/v1/watch/status", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.WatchStatusResponse - json.Unmarshal(body, &result) - - if result.Enabled { - t.Error("Expected enabled to be false") - } - if len(result.WatchedPaths) != 0 { - t.Errorf("Expected 0 watched paths, got %d", len(result.WatchedPaths)) - } -} - -func TestWatchHandler_Status_NilPaths(t *testing.T) { - tc := setupWatchTestApp(t) - defer tc.Ctrl.Finish() - - tc.WatchService.EXPECT().IsWatching().Return(true) - tc.WatchService.EXPECT().GetWatchedPaths().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/watch/status", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} diff --git a/internal/handler/websocket_handler.go b/internal/handler/websocket_handler.go deleted file mode 100644 index f462e86f..00000000 --- a/internal/handler/websocket_handler.go +++ /dev/null @@ -1,142 +0,0 @@ -package handler - -import ( - "time" - - "github.com/gofiber/contrib/websocket" - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -type WebsocketHandler struct { - authorizerService ports.AuthorizerServiceInterface - scrollService ports.ScrollServiceInterface - consoleService ports.ConsoleManagerInterface -} - -func domainConsoleToAPI(dc *domain.Console) api.Console { - return api.Console{ - Type: api.ConsoleType(dc.Type), - InputMode: dc.InputMode, - Exit: dc.Exit, - } -} - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Maximum message size allowed from peer. - maxMessageSize = 512 -) - -func NewWebsocketHandler( - authorizerService ports.AuthorizerServiceInterface, - scrollService ports.ScrollServiceInterface, - consoleService ports.ConsoleManagerInterface, -) *WebsocketHandler { - return &WebsocketHandler{ - authorizerService, - scrollService, - consoleService, - } -} - -func (ah WebsocketHandler) CreateToken(c *fiber.Ctx) error { - token := ah.authorizerService.GenerateQueryToken() - - c.JSON(api.TokenResponse{Token: token}) - return nil -} - -func (ah WebsocketHandler) GetConsoles(c *fiber.Ctx) error { - consoles := ah.consoleService.GetConsoles() - - // Convert domain consoles to API consoles - apiConsoles := make(map[string]api.Console, len(consoles)) - for k, v := range consoles { - apiConsoles[k] = domainConsoleToAPI(v) - } - - c.JSON(api.ConsolesResponse{Consoles: apiConsoles}) - return nil -} - -func (wh WebsocketHandler) HandleProcess(c *websocket.Conn) { - param := c.Params("console") - defer c.Close() - - // Get console channel - channel := wh.consoleService.GetConsole(param) - if channel == nil { - logger.Log().Warn("Console not found", zap.String("console", param)) - return - } - - // Subscribe to console output - subscriptionChannel := channel.Channel.Subscribe() - defer channel.Channel.Unsubscribe(subscriptionChannel) - - // Set up ping/pong - c.SetReadLimit(maxMessageSize) - c.SetReadDeadline(time.Now().Add(pongWait)) - c.SetPongHandler(func(string) error { - c.SetReadDeadline(time.Now().Add(pongWait)) - return nil - }) - - logger.Log().Info("WebSocket client connected to console", zap.String("console", param)) - - // Create ping ticker - pingTicker := time.NewTicker(pingPeriod) - defer pingTicker.Stop() - - // Start reader goroutine to detect disconnects - done := make(chan struct{}) - go func() { - defer close(done) - for { - _, _, err := c.ReadMessage() - if err != nil { - logger.Log().Debug("WebSocket client disconnected", zap.Error(err)) - return - } - } - }() - - // Main event loop - for { - select { - case <-done: - return - - case buffer, ok := <-subscriptionChannel: - if buffer == nil || !ok { - return - } - - c.SetWriteDeadline(time.Now().Add(writeWait)) - if err := c.WriteMessage(websocket.TextMessage, *buffer); err != nil { - logger.Log().Debug("Failed to send console output, client disconnected", zap.Error(err)) - return - } - - case <-pingTicker.C: - c.SetWriteDeadline(time.Now().Add(writeWait)) - if err := c.WriteMessage(websocket.PingMessage, nil); err != nil { - logger.Log().Debug("Failed to send ping, client disconnected", zap.Error(err)) - return - } - } - } -} diff --git a/internal/handler/websocket_handler_test.go b/internal/handler/websocket_handler_test.go deleted file mode 100644 index 724c99a3..00000000 --- a/internal/handler/websocket_handler_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package handler - -import ( - "encoding/json" - "io" - "net/http/httptest" - "testing" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" -) - -// WebsocketTestContext holds all mocked services for websocket handler testing -type WebsocketTestContext struct { - App *fiber.App - Ctrl *gomock.Controller - AuthorizerService *mock_ports.MockAuthorizerServiceInterface - ScrollService *mock_ports.MockScrollServiceInterface - ConsoleService *mock_ports.MockConsoleManagerInterface - Handler *WebsocketHandler -} - -// setupWebsocketTestApp creates a Fiber app with mocked dependencies for testing -func setupWebsocketTestApp(t *testing.T) *WebsocketTestContext { - ctrl := gomock.NewController(t) - - authorizerService := mock_ports.NewMockAuthorizerServiceInterface(ctrl) - scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - consoleService := mock_ports.NewMockConsoleManagerInterface(ctrl) - - handler := NewWebsocketHandler(authorizerService, scrollService, consoleService) - - app := fiber.New() - app.Get("/api/v1/token", handler.CreateToken) - app.Get("/api/v1/consoles", handler.GetConsoles) - - return &WebsocketTestContext{ - App: app, - Ctrl: ctrl, - AuthorizerService: authorizerService, - ScrollService: scrollService, - ConsoleService: consoleService, - Handler: handler, - } -} - -func TestWebsocketHandler_CreateToken_Success(t *testing.T) { - tc := setupWebsocketTestApp(t) - defer tc.Ctrl.Finish() - - expectedToken := "test-token-12345" - tc.AuthorizerService.EXPECT().GenerateQueryToken().Return(expectedToken) - - req := httptest.NewRequest("GET", "/api/v1/token", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.TokenResponse - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if result.Token != expectedToken { - t.Errorf("Expected token '%s', got '%s'", expectedToken, result.Token) - } -} - -func TestWebsocketHandler_CreateToken_EmptyToken(t *testing.T) { - tc := setupWebsocketTestApp(t) - defer tc.Ctrl.Finish() - - tc.AuthorizerService.EXPECT().GenerateQueryToken().Return("") - - req := httptest.NewRequest("GET", "/api/v1/token", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.TokenResponse - json.Unmarshal(body, &result) - - if result.Token != "" { - t.Errorf("Expected empty token, got '%s'", result.Token) - } -} - -func TestWebsocketHandler_Consoles_Success(t *testing.T) { - tc := setupWebsocketTestApp(t) - defer tc.Ctrl.Finish() - - expectedConsoles := map[string]*domain.Console{ - "start.0": { - InputMode: "stdin", - }, - "worker.0": { - InputMode: "rcon", - }, - } - tc.ConsoleService.EXPECT().GetConsoles().Return(expectedConsoles) - - req := httptest.NewRequest("GET", "/api/v1/consoles", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ConsolesResponse - if err := json.Unmarshal(body, &result); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(result.Consoles) != 2 { - t.Errorf("Expected 2 consoles, got %d", len(result.Consoles)) - } - if _, ok := result.Consoles["start.0"]; !ok { - t.Error("Expected 'start.0' console to be present") - } -} - -func TestWebsocketHandler_Consoles_Empty(t *testing.T) { - tc := setupWebsocketTestApp(t) - defer tc.Ctrl.Finish() - - tc.ConsoleService.EXPECT().GetConsoles().Return(map[string]*domain.Console{}) - - req := httptest.NewRequest("GET", "/api/v1/consoles", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ConsolesResponse - json.Unmarshal(body, &result) - - if len(result.Consoles) != 0 { - t.Errorf("Expected 0 consoles, got %d", len(result.Consoles)) - } -} - -func TestWebsocketHandler_Consoles_Nil(t *testing.T) { - tc := setupWebsocketTestApp(t) - defer tc.Ctrl.Finish() - - tc.ConsoleService.EXPECT().GetConsoles().Return(nil) - - req := httptest.NewRequest("GET", "/api/v1/consoles", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } -} - -func TestWebsocketHandler_Consoles_SingleConsole(t *testing.T) { - tc := setupWebsocketTestApp(t) - defer tc.Ctrl.Finish() - - expectedConsoles := map[string]*domain.Console{ - "main.0": { - InputMode: "stdin", - }, - } - tc.ConsoleService.EXPECT().GetConsoles().Return(expectedConsoles) - - req := httptest.NewRequest("GET", "/api/v1/consoles", nil) - resp, err := tc.App.Test(req) - if err != nil { - t.Fatalf("Failed to execute request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Errorf("Expected status 200, got %d", resp.StatusCode) - } - - body, _ := io.ReadAll(resp.Body) - var result api.ConsolesResponse - json.Unmarshal(body, &result) - - if len(result.Consoles) != 1 { - t.Errorf("Expected 1 console, got %d", len(result.Consoles)) - } - if result.Consoles["main.0"].InputMode != "stdin" { - t.Errorf("Expected input mode 'stdin', got '%s'", result.Consoles["main.0"].InputMode) - } -} diff --git a/internal/runtime/backend.go b/internal/runtime/backend.go new file mode 100644 index 00000000..b3de79f8 --- /dev/null +++ b/internal/runtime/backend.go @@ -0,0 +1,36 @@ +package runtime + +import ( + "fmt" + + "github.com/highcard-dev/daemon/internal/core/ports" + "github.com/highcard-dev/daemon/internal/runtime/docker" + runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" +) + +type Options struct { + Kubernetes runtimekubernetes.Config +} + +type Option func(*Options) + +func WithKubernetesConfig(config runtimekubernetes.Config) Option { + return func(options *Options) { + options.Kubernetes = config + } +} + +func NewBackend(name string, consoleManager ports.ConsoleManagerInterface, opts ...Option) (ports.RuntimeBackendInterface, error) { + options := Options{} + for _, opt := range opts { + opt(&options) + } + switch name { + case "", "docker": + return docker.New(consoleManager) + case "kubernetes": + return runtimekubernetes.New(options.Kubernetes, consoleManager) + default: + return nil, fmt.Errorf("unknown runtime backend %q", name) + } +} diff --git a/internal/runtime/docker/backend.go b/internal/runtime/docker/backend.go new file mode 100644 index 00000000..cd39de27 --- /dev/null +++ b/internal/runtime/docker/backend.go @@ -0,0 +1,689 @@ +package docker + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" +) + +type Backend struct { + client *client.Client + consoleManager ports.ConsoleManagerInterface + mu sync.Mutex + containers map[string]string + stdin map[string]io.Writer +} + +func New(consoleManager ports.ConsoleManagerInterface) (*Backend, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + return &Backend{ + client: cli, + consoleManager: consoleManager, + containers: map[string]string{}, + stdin: map[string]io.Writer{}, + }, nil +} + +func (b *Backend) Name() string { + return "docker" +} + +func (b *Backend) ReadScrollFile(scrollRoot string) ([]byte, error) { + if scrollRoot == "" { + return nil, fmt.Errorf("scroll root is required") + } + return os.ReadFile(filepath.Join(scrollRoot, "scroll.yaml")) +} + +func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { + for idx, procedure := range command.Command.Procedures { + procedureName := commandProcedureName(command.Name, idx, procedure) + if command.Command.Run == domain.RunModePersistent { + if procedure.IsSignal() { + if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.DataRoot); err != nil { + return nil, err + } + continue + } + if procedure.Image == "" { + return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) + } + if err := b.startPersistentContainer(procedureName, procedure, command.DataRoot, command.GlobalPorts); err != nil { + return nil, err + } + continue + } + exitCode, err := b.runProcedure(procedureName, procedure, command.DataRoot, command.GlobalPorts) + if err != nil { + return exitCode, err + } + if exitCode != nil && *exitCode != 0 { + if procedure.IgnoreFailure { + continue + } + return exitCode, nil + } + } + return nil, nil +} + +func (b *Backend) runProcedure(procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*int, error) { + if procedure.IsSignal() { + return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, dataRoot) + } + if procedure.Image == "" { + return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) + } + return b.runContainer(procedureName, procedure, dataRoot, globalPorts) +} + +func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { + statuses := []domain.RuntimePortStatus{} + portsByName := portsByName(globalPorts) + for commandName, command := range commands { + if command == nil { + continue + } + for idx, procedure := range command.Procedures { + if procedure == nil || len(procedure.ExpectedPorts) == 0 { + continue + } + procedureName := fmt.Sprintf("%s.%d", commandName, idx) + if procedure.Id != nil { + procedureName = *procedure.Id + } + containerStatuses, err := b.expectedPortsForProcedure(dataRoot, procedureName, procedure, portsByName) + if err != nil { + return nil, err + } + statuses = append(statuses, containerStatuses...) + } + } + sort.Slice(statuses, func(i, j int) bool { + if statuses[i].Procedure == statuses[j].Procedure { + return statuses[i].Name < statuses[j].Name + } + return statuses[i].Procedure < statuses[j].Procedure + }) + return statuses, nil +} + +func (b *Backend) Attach(commandName string, data string) error { + b.mu.Lock() + stdin := b.stdin[commandName] + b.mu.Unlock() + if stdin == nil { + return fmt.Errorf("target container %s not attached", commandName) + } + _, err := stdin.Write([]byte(data)) + return err +} + +func (b *Backend) Signal(_ string, target string, signal string, dataRoot string) error { + if target == "" { + return nil + } + ctx := context.Background() + containerID := b.containerID(target, dataRoot) + options := container.StopOptions{} + if signal != "" { + options.Signal = signal + } + return b.client.ContainerStop(ctx, containerID, options) +} + +func (b *Backend) runContainer(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*int, error) { + ctx := context.Background() + if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { + return nil, err + } + if procedure.Image == "" { + return nil, errors.New("docker image is required") + } + + if err := b.pullImage(ctx, procedure.Image); err != nil { + return nil, err + } + + config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts) + if err != nil { + return nil, err + } + containerName := ContainerName(dataRoot, commandName) + _ = b.client.ContainerRemove(ctx, containerName, container.RemoveOptions{Force: true}) + + created, err := b.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) + if err != nil { + return nil, err + } + b.setContainer(commandName, created.ID) + defer func() { + b.clearContainer(commandName) + _ = b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + }() + + attach, err := b.client.ContainerAttach(ctx, created.ID, container.AttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + return nil, err + } + defer attach.Close() + b.setStdin(commandName, attach.Conn) + defer b.clearStdin(commandName) + + combined := make(chan string, 20) + consoleType := domain.ConsoleTypeContainer + if procedure.TTY { + consoleType = domain.ConsoleTypeTTY + } + console, doneChan := b.consoleManager.AddConsoleWithChannel(commandName, consoleType, "stdin", combined) + console.WriteInput = func(data string) error { + return b.Attach(commandName, data) + } + + var copyWG sync.WaitGroup + copyWG.Add(1) + go func() { + defer copyWG.Done() + defer close(combined) + writer := channelWriter{channel: combined} + if procedure.TTY { + _, _ = io.Copy(writer, attach.Reader) + return + } + _, _ = stdcopy.StdCopy(writer, writer, attach.Reader) + }() + + if err := b.client.ContainerStart(ctx, created.ID, container.StartOptions{}); err != nil { + return nil, err + } + + statusCh, errCh := b.client.ContainerWait(ctx, created.ID, container.WaitConditionNotRunning) + var exitCode int + select { + case waitErr := <-errCh: + if waitErr != nil { + return nil, waitErr + } + case status := <-statusCh: + exitCode = int(status.StatusCode) + } + _ = attach.CloseWrite() + copyWG.Wait() + console.MarkExited(exitCode) + <-doneChan + return &exitCode, nil +} + +func (b *Backend) startPersistentContainer(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) error { + ctx := context.Background() + if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { + return err + } + if procedure.Image == "" { + return errors.New("docker image is required") + } + if err := b.pullImage(ctx, procedure.Image); err != nil { + return err + } + config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts) + if err != nil { + return err + } + containerName := ContainerName(dataRoot, commandName) + _ = b.client.ContainerRemove(ctx, containerName, container.RemoveOptions{Force: true}) + created, err := b.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) + if err != nil { + return err + } + attach, err := b.client.ContainerAttach(ctx, created.ID, container.AttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + _ = b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + return err + } + b.setContainer(commandName, created.ID) + b.setStdin(commandName, attach.Conn) + + combined := make(chan string, 20) + consoleType := domain.ConsoleTypeContainer + if procedure.TTY { + consoleType = domain.ConsoleTypeTTY + } + console, _ := b.consoleManager.AddConsoleWithChannel(commandName, consoleType, "stdin", combined) + console.WriteInput = func(data string) error { + return b.Attach(commandName, data) + } + + go func() { + defer close(combined) + defer attach.Close() + writer := channelWriter{channel: combined} + if procedure.TTY { + _, _ = io.Copy(writer, attach.Reader) + return + } + _, _ = stdcopy.StdCopy(writer, writer, attach.Reader) + }() + + if err := b.client.ContainerStart(ctx, created.ID, container.StartOptions{}); err != nil { + attach.Close() + b.clearContainer(commandName) + b.clearStdin(commandName) + _ = b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + return err + } + + go func() { + statusCh, errCh := b.client.ContainerWait(context.Background(), created.ID, container.WaitConditionNotRunning) + select { + case <-errCh: + case status := <-statusCh: + exitCode := int(status.StatusCode) + console.MarkExited(exitCode) + } + b.clearContainer(commandName) + b.clearStdin(commandName) + }() + return nil +} + +func (b *Backend) pullImage(ctx context.Context, imageRef string) error { + reader, err := b.client.ImagePull(ctx, imageRef, image.PullOptions{}) + if err != nil { + return err + } + defer reader.Close() + _, _ = io.Copy(io.Discard, reader) + return nil +} + +func (b *Backend) containerID(commandName string, dataRoot string) string { + b.mu.Lock() + defer b.mu.Unlock() + if id := b.containers[commandName]; id != "" { + return id + } + return ContainerName(dataRoot, commandName) +} + +func (b *Backend) setContainer(commandName string, id string) { + b.mu.Lock() + defer b.mu.Unlock() + b.containers[commandName] = id +} + +func (b *Backend) clearContainer(commandName string) { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.containers, commandName) +} + +func (b *Backend) setStdin(commandName string, stdin io.Writer) { + b.mu.Lock() + defer b.mu.Unlock() + b.stdin[commandName] = stdin +} + +func (b *Backend) clearStdin(commandName string) { + b.mu.Lock() + defer b.mu.Unlock() + delete(b.stdin, commandName) +} + +type channelWriter struct { + channel chan<- string +} + +func (w channelWriter) Write(p []byte) (int, error) { + if len(p) > 0 { + w.channel <- string(p) + } + return len(p), nil +} + +func containerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*container.Config, *container.HostConfig, error) { + if procedure.Image == "" { + return nil, nil, errors.New("docker image is required") + } + runtimeDataRoot := filepath.Join(dataRoot, domain.RuntimeDataDir) + if err := os.MkdirAll(runtimeDataRoot, 0755); err != nil { + return nil, nil, err + } + + exposedPorts := nat.PortSet{} + portBindings := nat.PortMap{} + for _, expectedPort := range procedure.ExpectedPorts { + port, ok := portsByName(globalPorts)[expectedPort.Name] + if !ok { + return nil, nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) + } + protocol := port.Protocol + if protocol == "" || protocol == "http" || protocol == "https" { + protocol = "tcp" + } + dockerPort := nat.Port(fmt.Sprintf("%d/%s", port.Port, protocol)) + exposedPorts[dockerPort] = struct{}{} + portBindings[dockerPort] = []nat.PortBinding{{HostPort: fmt.Sprintf("%d", port.Port)}} + } + + binds := []string{} + for _, mount := range procedure.Mounts { + if mount.Path == "" { + return nil, nil, fmt.Errorf("mount path is required") + } + subPath := mount.SubPath + if subPath == "" { + subPath = "." + } + hostPath := filepath.Join(runtimeDataRoot, filepath.FromSlash(subPath)) + if err := os.MkdirAll(hostPath, 0755); err != nil { + return nil, nil, err + } + bind := fmt.Sprintf("%s:%s", hostPath, mount.Path) + if mount.ReadOnly { + bind += ":ro" + } + binds = append(binds, bind) + } + + return &container.Config{ + Image: procedure.Image, + Cmd: procedure.Command, + WorkingDir: procedure.WorkingDir, + Env: envArgs(procedure.Env), + ExposedPorts: exposedPorts, + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + OpenStdin: true, + Tty: procedure.TTY, + Labels: map[string]string{ + "druid.command": commandName, + }, + }, &container.HostConfig{ + Binds: binds, + PortBindings: portBindings, + }, nil +} + +func ContainerName(scrollRoot string, commandName string) string { + hash := sha1.Sum([]byte(scrollRoot)) + name := sanitizeContainerName(commandName) + return fmt.Sprintf("druid-%s-%s", hex.EncodeToString(hash[:])[:10], name) +} + +func commandProcedureName(commandName string, idx int, procedure *domain.Procedure) string { + procedureName := fmt.Sprintf("%s.%d", commandName, idx) + if procedure != nil && procedure.Id != nil { + procedureName = *procedure.Id + } + return procedureName +} + +func sanitizeContainerName(name string) string { + re := regexp.MustCompile(`[^a-zA-Z0-9_.-]+`) + name = re.ReplaceAllString(name, "-") + name = strings.Trim(name, "-_.") + if name == "" { + return "command" + } + return name +} + +func envArgs(env map[string]string) []string { + if len(env) == 0 { + return nil + } + keys := make([]string, 0, len(env)) + for key := range env { + keys = append(keys, key) + } + sort.Strings(keys) + args := make([]string, 0, len(keys)) + for _, key := range keys { + args = append(args, fmt.Sprintf("%s=%s", key, env[key])) + } + return args +} + +type ContainerSpec struct { + Image string + Command []string + WorkingDir string + Env []string + Binds []string + PortBindings nat.PortMap + TTY bool +} + +func BuildContainerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*ContainerSpec, error) { + config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts) + if err != nil { + return nil, err + } + return &ContainerSpec{ + Image: config.Image, + Command: config.Cmd, + WorkingDir: config.WorkingDir, + Env: config.Env, + Binds: hostConfig.Binds, + PortBindings: hostConfig.PortBindings, + TTY: config.Tty, + }, nil +} + +type containerTraffic struct { + containerID string + rxBytes uint64 + txBytes uint64 + lastDeltaRX uint64 + lastActivityAt *time.Time + samples []trafficSample +} + +type trafficSample struct { + at time.Time + rx uint64 + tx uint64 +} + +type trafficStore struct { + mu sync.Mutex + samples map[string][]trafficSample + lastActivityAt map[string]time.Time +} + +var globalTrafficStore = &trafficStore{ + samples: map[string][]trafficSample{}, + lastActivityAt: map[string]time.Time{}, +} + +func (s *trafficStore) record(containerID string, rxBytes uint64, txBytes uint64, now time.Time) containerTraffic { + s.mu.Lock() + defer s.mu.Unlock() + samples := s.samples[containerID] + var lastDeltaRX uint64 + if len(samples) > 0 && rxBytes >= samples[len(samples)-1].rx { + lastDeltaRX = rxBytes - samples[len(samples)-1].rx + if lastDeltaRX > 0 { + s.lastActivityAt[containerID] = now + } + } + samples = append(samples, trafficSample{at: now, rx: rxBytes, tx: txBytes}) + cutoff := now.Add(-24 * time.Hour) + keepFrom := 0 + for keepFrom < len(samples) && samples[keepFrom].at.Before(cutoff) { + keepFrom++ + } + samples = samples[keepFrom:] + s.samples[containerID] = samples + + var lastActivityAt *time.Time + if last, ok := s.lastActivityAt[containerID]; ok { + lastCopy := last + lastActivityAt = &lastCopy + } + samplesCopy := append([]trafficSample(nil), samples...) + return containerTraffic{ + containerID: containerID, + rxBytes: rxBytes, + txBytes: txBytes, + lastDeltaRX: lastDeltaRX, + lastActivityAt: lastActivityAt, + samples: samplesCopy, + } +} + +func (t containerTraffic) rxDelta(window time.Duration, now time.Time) uint64 { + if window <= 0 || len(t.samples) == 0 { + return t.lastDeltaRX + } + cutoff := now.Add(-window) + base := t.samples[0] + for _, sample := range t.samples { + if !sample.at.Before(cutoff) { + base = sample + break + } + } + if t.rxBytes < base.rx { + return 0 + } + return t.rxBytes - base.rx +} + +func (b *Backend) expectedPortsForProcedure(dataRoot string, procedureName string, procedure *domain.Procedure, ports map[string]domain.Port) ([]domain.RuntimePortStatus, error) { + statuses := make([]domain.RuntimePortStatus, 0, len(procedure.ExpectedPorts)) + containerName := ContainerName(dataRoot, procedureName) + ctx := context.Background() + inspected, err := b.client.ContainerInspect(ctx, containerName) + containerFound := err == nil + if err != nil && !client.IsErrNotFound(err) { + return nil, err + } + + var traffic *containerTraffic + if containerFound { + if sample, err := b.containerTraffic(ctx, inspected.ID); err == nil { + traffic = sample + } + } + + for _, expectedPort := range procedure.ExpectedPorts { + port, ok := ports[expectedPort.Name] + if !ok { + return nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) + } + status := domain.RuntimePortStatus{ + Name: expectedPort.Name, + Procedure: procedureName, + Port: port.Port, + Protocol: normalizeProtocol(port.Protocol), + KeepAliveTraffic: expectedPort.KeepAliveTraffic, + Source: "docker-container-stats", + } + if containerFound { + status.Bound, status.HostIP, status.HostPort = dockerPortBinding(inspected.NetworkSettings.Ports, port) + } + if traffic != nil { + rx := traffic.rxBytes + tx := traffic.txBytes + status.RXBytes = &rx + status.TXBytes = &tx + status.LastActivityAt = traffic.lastActivityAt + delta := traffic.lastDeltaRX + if expectedPort.KeepAliveTraffic != "" { + threshold, err := domain.ParseKeepAliveTraffic(expectedPort.KeepAliveTraffic) + if err != nil { + return nil, err + } + delta = traffic.rxDelta(threshold.Window, time.Now()) + trafficOK := delta >= threshold.Bytes + status.TrafficOK = &trafficOK + status.TrafficWindow = threshold.Window.String() + } + status.Traffic = delta > 0 + status.TrafficBytes = &delta + } + statuses = append(statuses, status) + } + return statuses, nil +} + +func (b *Backend) containerTraffic(ctx context.Context, containerID string) (*containerTraffic, error) { + stats, err := b.client.ContainerStats(ctx, containerID, false) + if err != nil { + return nil, err + } + defer stats.Body.Close() + var response container.StatsResponse + if err := json.NewDecoder(stats.Body).Decode(&response); err != nil { + return nil, err + } + var rxBytes uint64 + var txBytes uint64 + for _, network := range response.Networks { + rxBytes += network.RxBytes + txBytes += network.TxBytes + } + traffic := globalTrafficStore.record(containerID, rxBytes, txBytes, time.Now()) + return &traffic, nil +} + +func dockerPortBinding(bindings nat.PortMap, port domain.Port) (bool, string, int) { + dockerPort := nat.Port(fmt.Sprintf("%d/%s", port.Port, normalizeProtocol(port.Protocol))) + portBindings := bindings[dockerPort] + if len(portBindings) == 0 { + return false, "", 0 + } + hostPort, _ := strconv.Atoi(portBindings[0].HostPort) + return true, portBindings[0].HostIP, hostPort +} + +func portsByName(ports []domain.Port) map[string]domain.Port { + result := make(map[string]domain.Port, len(ports)) + for _, port := range ports { + result[port.Name] = port + } + return result +} + +func normalizeProtocol(protocol string) string { + protocol = strings.ToLower(protocol) + if protocol == "" || protocol == "http" || protocol == "https" { + return "tcp" + } + return protocol +} diff --git a/internal/runtime/kubernetes/backend.go b/internal/runtime/kubernetes/backend.go new file mode 100644 index 00000000..86632b08 --- /dev/null +++ b/internal/runtime/kubernetes/backend.go @@ -0,0 +1,669 @@ +package kubernetes + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "strings" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + k8sclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +type Backend struct { + client k8sclient.Interface + consoleManager ports.ConsoleManagerInterface + config Config + hubble HubbleClient +} + +func New(config Config, consoleManager ports.ConsoleManagerInterface) (*Backend, error) { + config = config.WithDefaults() + + restConfig, namespace, source, inCluster, err := runtimeRESTConfig(config) + if err != nil { + return nil, err + } + config.Namespace = namespace + if err := config.ValidateForBackend(); err != nil { + return nil, err + } + + client, err := k8sclient.NewForConfig(restConfig) + if err != nil { + return nil, err + } + if _, err := client.Discovery().ServerVersion(); err != nil { + return nil, fmt.Errorf("kubernetes API unavailable: %w", err) + } + logger.Log().Info("Using Kubernetes runtime config", zap.String("source", source), zap.String("namespace", config.Namespace)) + backend := &Backend{ + client: client, + consoleManager: consoleManager, + config: config, + hubble: NewHubbleRelayClient(config.HubbleRelayAddr), + } + if config.PullImage == "" { + logger.Log().Warn("Kubernetes cluster materialization requires --k8s-pull-image or DRUID_K8S_PULL_IMAGE") + } + if !inCluster && config.HubbleRelayAddr == defaultHubbleRelayAddr { + logger.Log().Warn("Default Hubble Relay address may not be reachable outside the cluster; set --hubble-relay-addr or port-forward Hubble Relay", zap.String("addr", config.HubbleRelayAddr)) + } + if err := backend.checkHubble(context.Background()); err != nil { + logger.Log().Warn("Hubble Relay unavailable; Kubernetes port traffic will degrade to Service/Endpoint status", zap.Error(err), zap.String("addr", config.HubbleRelayAddr)) + } + return backend, nil +} + +func runtimeRESTConfig(config Config) (*rest.Config, string, string, bool, error) { + restConfig, inClusterErr := rest.InClusterConfig() + if inClusterErr == nil { + namespace := config.Namespace + if namespace == "" { + namespace = namespaceFromServiceAccount() + } + if namespace == "" { + namespace = "default" + } + return restConfig, namespace, "in-cluster", true, nil + } + + restConfig, namespace, source, kubeconfigErr := kubeconfigRESTConfig(config) + if kubeconfigErr != nil { + return nil, "", "", false, fmt.Errorf("kubernetes runtime could not load auth: in-cluster config unavailable (%v); kubeconfig unavailable (%w)", inClusterErr, kubeconfigErr) + } + return restConfig, namespace, source, false, nil +} + +func kubeconfigRESTConfig(config Config) (*rest.Config, string, string, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + source := "kubeconfig" + if config.Kubeconfig != "" { + loadingRules.ExplicitPath = config.Kubeconfig + source = config.Kubeconfig + } + overrides := &clientcmd.ConfigOverrides{} + if config.Namespace != "" { + overrides.Context.Namespace = config.Namespace + } + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return nil, "", source, err + } + namespace, _, err := clientConfig.Namespace() + if err != nil || namespace == "" { + namespace = "default" + } + return restConfig, namespace, source, nil +} + +func NewWithClient(config Config, consoleManager ports.ConsoleManagerInterface, client k8sclient.Interface, hubble HubbleClient) *Backend { + config = config.WithDefaults() + if config.Namespace == "" { + config.Namespace = "default" + } + if hubble == nil { + hubble = NewHubbleRelayClient(config.HubbleRelayAddr) + } + return &Backend{client: client, consoleManager: consoleManager, config: config, hubble: hubble} +} + +func (b *Backend) Name() string { + return "kubernetes" +} + +func (b *Backend) MaterializeScroll(ctx context.Context, artifact string, requestedName string) (*ports.RuntimeMaterialization, error) { + if err := b.config.ValidateForMaterialization(); err != nil { + return nil, err + } + stagePVC := stagingPVCName(artifact + requestedName) + if err := b.ensurePVC(ctx, stagePVC); err != nil { + return nil, err + } + pullJob := pullJobSpec(b.config.Namespace, jobName("pull", ref(b.config.Namespace, stagePVC), shortHash(artifact)), stagePVC, b.config.PullImage, artifact, b.config.RegistrySecret, b.config.RegistryPlainHTTP) + if err := b.runHelperJob(ctx, pullJob); err != nil { + return nil, err + } + scrollYAML, err := b.ReadScrollFile(ref(b.config.Namespace, stagePVC)) + if err != nil { + return nil, err + } + scroll, err := domain.NewScrollFromBytes("", scrollYAML) + if err != nil { + return nil, err + } + id, err := coreservices.RuntimeScrollID(requestedName, scroll.Name) + if err != nil { + return nil, err + } + finalPVC := dataPVCName(id) + if err := b.ensurePVC(ctx, finalPVC); err != nil { + return nil, err + } + copyJob := copyPVCJobSpec(b.config.Namespace, jobName("copy", ref(b.config.Namespace, finalPVC), shortHash(stagePVC)), stagePVC, finalPVC, b.config.HelperImage) + if err := b.runHelperJob(ctx, copyJob); err != nil { + return nil, err + } + _ = b.client.CoreV1().PersistentVolumeClaims(b.config.Namespace).Delete(ctx, stagePVC, metav1.DeleteOptions{}) + return &ports.RuntimeMaterialization{ + Artifact: artifact, + ScrollRoot: ref(b.config.Namespace, finalPVC), + DataRoot: ref(b.config.Namespace, finalPVC), + ScrollYAML: scrollYAML, + }, nil +} + +func (b *Backend) ReadScrollFile(scrollRoot string) ([]byte, error) { + namespace, pvc, err := parseRef(scrollRoot) + if err != nil { + return nil, err + } + job := readScrollJobSpec(namespace, jobName("read", scrollRoot, "scroll-yaml"), pvc, b.config.HelperImage) + return b.runJobAndLogs(context.Background(), job) +} + +func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { + for idx, procedure := range command.Command.Procedures { + procedureName := commandProcedureName(command.Name, idx, procedure) + if command.Command.Run == domain.RunModePersistent { + if procedure.IsSignal() { + if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.DataRoot); err != nil { + return nil, err + } + continue + } + if procedure.Image == "" { + return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) + } + if err := b.ensurePersistentProcedure(context.Background(), command.DataRoot, procedureName, procedure, command.GlobalPorts); err != nil { + return nil, err + } + continue + } + exitCode, err := b.runJobProcedure(procedureName, procedure, command.DataRoot, command.GlobalPorts) + if err != nil { + return exitCode, err + } + if exitCode != nil && *exitCode != 0 { + if procedure.IgnoreFailure { + continue + } + return exitCode, nil + } + } + return nil, nil +} + +func (b *Backend) runJobProcedure(procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*int, error) { + if procedure.IsSignal() { + return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, dataRoot) + } + if procedure.Image == "" { + return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) + } + ctx := context.Background() + if err := b.ensureExpectedServices(ctx, dataRoot, procedureName, procedure, globalPorts); err != nil { + return nil, err + } + job, err := procedureJobSpec(b.config.Namespace, dataRoot, procedureName, procedure, b.config.RegistrySecret) + if err != nil { + return nil, err + } + _ = b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, job.Name, metav1.DeleteOptions{}) + if _, err := b.client.BatchV1().Jobs(b.config.Namespace).Create(ctx, job, metav1.CreateOptions{}); err != nil { + return nil, err + } + output := make(chan string, 100) + console, doneChan := b.consoleManager.AddConsoleWithChannel(procedureName, domain.ConsoleTypeContainer, "stdin", output) + console.WriteInput = func(data string) error { + return b.Attach(procedureName, data) + } + streamStarted := false + podName, err := b.waitForJobPod(ctx, job.Name) + if err == nil { + streamStarted = true + go b.streamPodLogs(ctx, podName, output) + } + exitCode, err := b.waitForJob(ctx, job.Name) + if exitCode != nil { + console.MarkExited(*exitCode) + } + if !streamStarted { + close(output) + } + <-doneChan + if err != nil { + return exitCode, err + } + return exitCode, nil +} + +func (b *Backend) ensurePersistentProcedure(ctx context.Context, dataRoot string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port) error { + if err := b.ensureExpectedServices(ctx, dataRoot, procedureName, procedure, globalPorts); err != nil { + return err + } + statefulSet, err := procedureStatefulSetSpec(b.config.Namespace, dataRoot, procedureName, procedure, b.config.RegistrySecret) + if err != nil { + return err + } + existing, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Get(ctx, statefulSet.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + if _, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Create(ctx, statefulSet, metav1.CreateOptions{}); err != nil { + return err + } + case err != nil: + return err + default: + statefulSet.ResourceVersion = existing.ResourceVersion + if _, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Update(ctx, statefulSet, metav1.UpdateOptions{}); err != nil { + return err + } + } + output := make(chan string, 100) + console, _ := b.consoleManager.AddConsoleWithChannel(procedureName, domain.ConsoleTypeContainer, "stdin", output) + console.WriteInput = func(data string) error { + return b.Attach(procedureName, data) + } + if err := b.waitForStatefulSet(ctx, statefulSet.Name); err != nil { + close(output) + return err + } + go func() { + podName, err := b.waitForPodBySelector(context.Background(), labels.SelectorFromSet(labels.Set{ + labelScrollID: statefulSet.Labels[labelScrollID], + labelProcedure: statefulSet.Labels[labelProcedure], + }).String()) + if err != nil { + output <- fmt.Sprintf("failed to find StatefulSet pod logs: %v", err) + close(output) + return + } + b.streamPodLogs(context.Background(), podName, output) + }() + return nil +} + +func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return nil, err + } + portsByName := portsByName(globalPorts) + statuses := []domain.RuntimePortStatus{} + hubbleAvailable := true + if err := b.checkHubble(context.Background()); err != nil { + hubbleAvailable = false + logger.Log().Warn("Hubble Relay unavailable; Kubernetes port traffic unavailable", zap.Error(err)) + } + for commandName, command := range commands { + if command == nil { + continue + } + for idx, procedure := range command.Procedures { + if procedure == nil || len(procedure.ExpectedPorts) == 0 { + continue + } + procedureName := fmt.Sprintf("%s.%d", commandName, idx) + if procedure.Id != nil { + procedureName = *procedure.Id + } + for _, expectedPort := range procedure.ExpectedPorts { + port, ok := portsByName[expectedPort.Name] + if !ok { + return nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) + } + status := domain.RuntimePortStatus{ + Name: expectedPort.Name, + Procedure: procedureName, + Port: port.Port, + Protocol: normalizeProtocol(port.Protocol), + KeepAliveTraffic: expectedPort.KeepAliveTraffic, + Source: "kubernetes-service", + } + serviceReady, hostPort := b.serviceReady(context.Background(), serviceName(dataRoot, procedureName, expectedPort.Name)) + status.Bound = serviceReady + status.HostPort = hostPort + if !hubbleAvailable { + status.Source = "hubble-relay-unavailable" + statuses = append(statuses, status) + continue + } + window := 5 * time.Minute + if expectedPort.KeepAliveTraffic != "" { + threshold, err := domain.ParseKeepAliveTraffic(expectedPort.KeepAliveTraffic) + if err != nil { + return nil, err + } + window = threshold.Window + status.TrafficWindow = threshold.Window.String() + } + traffic, err := b.hubble.HasFlow(context.Background(), TrafficQuery{ + Namespace: b.config.Namespace, + ScrollID: pvc, + ProcedureName: procedureName, + Port: port, + ExpectedPort: expectedPort, + Window: window, + }) + if err != nil { + logger.Log().Warn("Hubble Relay query failed", zap.Error(err)) + status.Source = "hubble-relay-unavailable" + statuses = append(statuses, status) + continue + } + status.Source = "hubble-relay" + status.Traffic = traffic + if expectedPort.KeepAliveTraffic != "" { + trafficOK := traffic + status.TrafficOK = &trafficOK + } + statuses = append(statuses, status) + } + } + } + return statuses, nil +} + +func (b *Backend) Attach(commandName string, data string) error { + return fmt.Errorf("kubernetes attach is not implemented for console %s: pod attach/exec support is required", commandName) +} + +func (b *Backend) Signal(_ string, target string, signal string, dataRoot string) error { + if target == "" { + return nil + } + switch signal { + case "", "SIGTERM", "TERM": + propagation := metav1.DeletePropagationBackground + return b.deleteRuntimeWorkload(context.Background(), dataRoot, target, metav1.DeleteOptions{PropagationPolicy: &propagation}) + case "SIGKILL", "KILL": + grace := int64(0) + propagation := metav1.DeletePropagationBackground + return b.deleteRuntimeWorkload(context.Background(), dataRoot, target, metav1.DeleteOptions{GracePeriodSeconds: &grace, PropagationPolicy: &propagation}) + default: + return fmt.Errorf("kubernetes signal %s is unsupported without pod exec", signal) + } +} + +func (b *Backend) deleteRuntimeWorkload(ctx context.Context, dataRoot string, target string, options metav1.DeleteOptions) error { + jobErr := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, jobName("proc", dataRoot, target), options) + if apierrors.IsNotFound(jobErr) { + jobErr = nil + } + statefulSetErr := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, statefulSetName(dataRoot, target), options) + if apierrors.IsNotFound(statefulSetErr) { + statefulSetErr = nil + } + podErr := b.deleteRuntimePods(ctx, dataRoot, target, options) + if jobErr != nil { + return jobErr + } + if statefulSetErr != nil { + return statefulSetErr + } + return podErr +} + +func (b *Backend) deleteRuntimePods(ctx context.Context, dataRoot string, target string, options metav1.DeleteOptions) error { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + selector := labels.SelectorFromSet(labels.Set{ + labelScrollID: dnsLabel(pvc), + labelProcedure: dnsLabel(target), + }).String() + pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return err + } + for _, pod := range pods.Items { + if err := b.client.CoreV1().Pods(b.config.Namespace).Delete(ctx, pod.Name, options); err != nil && !apierrors.IsNotFound(err) { + return err + } + } + return nil +} + +func (b *Backend) ensurePVC(ctx context.Context, name string) error { + pvc := pvcSpec(b.config.Namespace, name, b.config.StorageClass) + _, err := b.client.CoreV1().PersistentVolumeClaims(b.config.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +func (b *Backend) runHelperJob(ctx context.Context, job *batchv1.Job) error { + _, err := b.runJobAndLogs(ctx, job) + return err +} + +func (b *Backend) runJobAndLogs(ctx context.Context, job *batchv1.Job) ([]byte, error) { + _ = b.client.BatchV1().Jobs(job.Namespace).Delete(ctx, job.Name, metav1.DeleteOptions{}) + if _, err := b.client.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}); err != nil { + return nil, err + } + podName, err := b.waitForJobPod(ctx, job.Name) + if err != nil { + return nil, err + } + exitCode, waitErr := b.waitForJob(ctx, job.Name) + logs, logErr := b.podLogs(ctx, podName) + if logErr != nil && waitErr == nil { + waitErr = logErr + } + if waitErr != nil { + return logs, waitErr + } + if exitCode != nil && *exitCode != 0 { + return logs, fmt.Errorf("job %s exited with code %d", job.Name, *exitCode) + } + return logs, nil +} + +func (b *Backend) waitForJobPod(ctx context.Context, jobName string) (string, error) { + selector := labels.SelectorFromSet(labels.Set{"job-name": jobName}).String() + return b.waitForPodBySelector(ctx, selector) +} + +func (b *Backend) waitForPodBySelector(ctx context.Context, selector string) (string, error) { + deadline := time.Now().Add(2 * time.Minute) + backoff := newCappedBackoff(podPollInitial, podPollMax) + for { + pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return "", err + } + if len(pods.Items) > 0 { + return pods.Items[0].Name, nil + } + if time.Now().After(deadline) { + return "", fmt.Errorf("timed out waiting for pod matching selector %s", selector) + } + if err := sleepUntilNextPoll(ctx, deadline, backoff.Next()); err != nil { + return "", err + } + } +} + +func (b *Backend) waitForStatefulSet(ctx context.Context, name string) error { + deadline := time.Now().Add(5 * time.Minute) + backoff := newCappedBackoff(statefulSetPollInitial, statefulSetPollMax) + for { + statefulSet, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + wanted := int32(1) + if statefulSet.Spec.Replicas != nil { + wanted = *statefulSet.Spec.Replicas + } + if statefulSet.Status.ReadyReplicas >= wanted { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("timed out waiting for StatefulSet %s to become ready", name) + } + if err := sleepUntilNextPoll(ctx, deadline, backoff.Next()); err != nil { + return err + } + } +} + +func (b *Backend) waitForJob(ctx context.Context, jobName string) (*int, error) { + startedAt := time.Now() + deadline := time.Now().Add(24 * time.Hour) + for { + job, err := b.client.BatchV1().Jobs(b.config.Namespace).Get(ctx, jobName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + if job.Status.Succeeded > 0 { + exitCode := 0 + return &exitCode, nil + } + if job.Status.Failed > 0 { + exitCode := b.lastExitCode(ctx, jobName) + return &exitCode, fmt.Errorf("job %s failed", jobName) + } + if time.Now().After(deadline) { + return nil, fmt.Errorf("timed out waiting for job %s", jobName) + } + if err := sleepUntilNextPoll(ctx, deadline, jobPollInterval(time.Since(startedAt))); err != nil { + return nil, err + } + } +} + +func (b *Backend) lastExitCode(ctx context.Context, jobName string) int { + selector := labels.SelectorFromSet(labels.Set{"job-name": jobName}).String() + pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil || len(pods.Items) == 0 { + return 1 + } + for _, status := range pods.Items[0].Status.ContainerStatuses { + if status.State.Terminated != nil { + return int(status.State.Terminated.ExitCode) + } + } + return 1 +} + +func (b *Backend) podLogs(ctx context.Context, podName string) ([]byte, error) { + req := b.client.CoreV1().Pods(b.config.Namespace).GetLogs(podName, &corev1.PodLogOptions{}) + stream, err := req.Stream(ctx) + if err != nil { + return nil, err + } + defer stream.Close() + return io.ReadAll(stream) +} + +func (b *Backend) streamPodLogs(ctx context.Context, podName string, output chan<- string) { + defer close(output) + req := b.client.CoreV1().Pods(b.config.Namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) + stream, err := req.Stream(ctx) + if err != nil { + output <- fmt.Sprintf("failed to stream pod logs: %v", err) + return + } + defer stream.Close() + scanner := bufio.NewScanner(stream) + for scanner.Scan() { + output <- scanner.Text() + } +} + +func (b *Backend) ensureExpectedServices(ctx context.Context, dataRoot string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port) error { + ports := portsByName(globalPorts) + for _, expected := range procedure.ExpectedPorts { + port, ok := ports[expected.Name] + if !ok { + return fmt.Errorf("expected port %s is not defined in top-level ports", expected.Name) + } + service, err := serviceSpec(b.config.Namespace, dataRoot, procedureName, expected.Name, port) + if err != nil { + return err + } + if _, err := b.client.CoreV1().Services(b.config.Namespace).Create(ctx, service, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + return err + } + } + return nil +} + +func (b *Backend) serviceReady(ctx context.Context, name string) (bool, int) { + service, err := b.client.CoreV1().Services(b.config.Namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, 0 + } + hostPort := 0 + if len(service.Spec.Ports) > 0 { + hostPort = int(service.Spec.Ports[0].Port) + } + selector := labels.SelectorFromSet(labels.Set{"kubernetes.io/service-name": name}).String() + slices, err := b.client.DiscoveryV1().EndpointSlices(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return false, hostPort + } + return endpointSlicesReady(slices.Items), hostPort +} + +func endpointSlicesReady(slices []discoveryv1.EndpointSlice) bool { + for _, slice := range slices { + for _, endpoint := range slice.Endpoints { + if endpoint.Conditions.Ready == nil || *endpoint.Conditions.Ready { + return true + } + } + } + return false +} + +func (b *Backend) checkHubble(ctx context.Context) error { + if b.hubble == nil { + return errors.New("hubble client is not configured") + } + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + _, err := b.hubble.HasFlow(ctx, TrafficQuery{Namespace: b.config.Namespace, Port: domain.Port{Port: 1, Protocol: "tcp"}}) + if err != nil && !strings.Contains(err.Error(), "context deadline") { + return err + } + return nil +} + +func portsByName(ports []domain.Port) map[string]domain.Port { + result := map[string]domain.Port{} + for _, port := range ports { + result[port.Name] = port + } + return result +} + +func commandProcedureName(commandName string, idx int, procedure *domain.Procedure) string { + procedureName := fmt.Sprintf("%s.%d", commandName, idx) + if procedure != nil && procedure.Id != nil { + procedureName = *procedure.Id + } + return procedureName +} diff --git a/internal/runtime/kubernetes/config.go b/internal/runtime/kubernetes/config.go new file mode 100644 index 00000000..8b1cc9b0 --- /dev/null +++ b/internal/runtime/kubernetes/config.go @@ -0,0 +1,84 @@ +package kubernetes + +import ( + "fmt" + "os" + "strings" +) + +const ( + defaultHubbleRelayAddr = "hubble-relay.kube-system.svc.cluster.local:80" + defaultHelperImage = "busybox:1.36" +) + +type Config struct { + Namespace string + StorageClass string + PullImage string + RegistrySecret string + RegistryPlainHTTP bool + HubbleRelayAddr string + HelperImage string + Kubeconfig string +} + +func (c Config) WithDefaults() Config { + if c.Namespace == "" { + c.Namespace = os.Getenv("DRUID_K8S_NAMESPACE") + } + if c.StorageClass == "" { + c.StorageClass = os.Getenv("DRUID_K8S_STORAGE_CLASS") + } + if c.PullImage == "" { + c.PullImage = os.Getenv("DRUID_K8S_PULL_IMAGE") + } + if c.RegistrySecret == "" { + c.RegistrySecret = os.Getenv("DRUID_K8S_REGISTRY_SECRET") + } + if !c.RegistryPlainHTTP { + c.RegistryPlainHTTP = plainHTTPEnv("DRUID_REGISTRY_PLAIN_HTTP") + } + if c.Kubeconfig == "" { + c.Kubeconfig = os.Getenv("DRUID_K8S_KUBECONFIG") + } + if c.HubbleRelayAddr == "" { + c.HubbleRelayAddr = os.Getenv("DRUID_HUBBLE_RELAY_ADDR") + } + if c.HubbleRelayAddr == "" { + c.HubbleRelayAddr = defaultHubbleRelayAddr + } + if c.HelperImage == "" { + c.HelperImage = os.Getenv("DRUID_K8S_HELPER_IMAGE") + } + if c.HelperImage == "" { + c.HelperImage = defaultHelperImage + } + return c +} + +func plainHTTPEnv(name string) bool { + value := strings.ToLower(strings.TrimSpace(os.Getenv(name))) + return value == "1" || value == "true" || value == "yes" +} + +func (c Config) ValidateForBackend() error { + if c.Namespace == "" { + return fmt.Errorf("kubernetes namespace is required") + } + return nil +} + +func (c Config) ValidateForMaterialization() error { + if c.PullImage == "" { + return fmt.Errorf("kubernetes pull image is required for cluster materialization; set --k8s-pull-image or DRUID_K8S_PULL_IMAGE") + } + return nil +} + +func namespaceFromServiceAccount() string { + data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "" + } + return strings.TrimSpace(string(data)) +} diff --git a/internal/runtime/kubernetes/config_test.go b/internal/runtime/kubernetes/config_test.go new file mode 100644 index 00000000..624994dd --- /dev/null +++ b/internal/runtime/kubernetes/config_test.go @@ -0,0 +1,124 @@ +package kubernetes + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestConfigWithDefaultsReadsKubeconfigEnv(t *testing.T) { + t.Setenv("DRUID_K8S_KUBECONFIG", "/tmp/druid-kubeconfig") + + config := Config{}.WithDefaults() + + if config.Kubeconfig != "/tmp/druid-kubeconfig" { + t.Fatalf("Kubeconfig = %s, want /tmp/druid-kubeconfig", config.Kubeconfig) + } +} + +func TestConfigWithDefaultsReadsRegistryPlainHTTPEnv(t *testing.T) { + t.Setenv("DRUID_REGISTRY_PLAIN_HTTP", "true") + + config := Config{}.WithDefaults() + + if !config.RegistryPlainHTTP { + t.Fatal("RegistryPlainHTTP = false, want true") + } +} + +func TestKubeconfigRESTConfigUsesCurrentContextNamespace(t *testing.T) { + kubeconfig := writeKubeconfig(t, "from-context") + + _, namespace, source, err := kubeconfigRESTConfig(Config{Kubeconfig: kubeconfig}) + if err != nil { + t.Fatal(err) + } + if namespace != "from-context" { + t.Fatalf("namespace = %s, want from-context", namespace) + } + if source != kubeconfig { + t.Fatalf("source = %s, want %s", source, kubeconfig) + } +} + +func TestKubeconfigRESTConfigExplicitNamespaceWins(t *testing.T) { + kubeconfig := writeKubeconfig(t, "from-context") + + _, namespace, _, err := kubeconfigRESTConfig(Config{Kubeconfig: kubeconfig, Namespace: "explicit"}) + if err != nil { + t.Fatal(err) + } + if namespace != "explicit" { + t.Fatalf("namespace = %s, want explicit", namespace) + } +} + +func TestKubeconfigRESTConfigDefaultsNamespace(t *testing.T) { + kubeconfig := writeKubeconfig(t, "") + + _, namespace, _, err := kubeconfigRESTConfig(Config{Kubeconfig: kubeconfig}) + if err != nil { + t.Fatal(err) + } + if namespace != "default" { + t.Fatalf("namespace = %s, want default", namespace) + } +} + +func TestKubeconfigRESTConfigUsesKUBECONFIGDefaultLoading(t *testing.T) { + kubeconfig := writeKubeconfig(t, "from-env") + t.Setenv("KUBECONFIG", kubeconfig) + + _, namespace, source, err := kubeconfigRESTConfig(Config{}) + if err != nil { + t.Fatal(err) + } + if namespace != "from-env" { + t.Fatalf("namespace = %s, want from-env", namespace) + } + if source != "kubeconfig" { + t.Fatalf("source = %s, want kubeconfig", source) + } +} + +func TestKubeconfigRESTConfigMissingExplicitPath(t *testing.T) { + _, _, _, err := kubeconfigRESTConfig(Config{Kubeconfig: filepath.Join(t.TempDir(), "missing")}) + if err == nil { + t.Fatal("error = nil, want missing kubeconfig error") + } + if !strings.Contains(err.Error(), "missing") { + t.Fatalf("error = %v, want missing path", err) + } +} + +func writeKubeconfig(t *testing.T, namespace string) string { + t.Helper() + namespaceLine := "" + if namespace != "" { + namespaceLine = " namespace: " + namespace + "\n" + } + kubeconfig := `apiVersion: v1 +kind: Config +clusters: +- name: test + cluster: + server: https://127.0.0.1:6443 + insecure-skip-tls-verify: true +contexts: +- name: test + context: + cluster: test + user: test +` + namespaceLine + `current-context: test +users: +- name: test + user: + token: test-token +` + path := filepath.Join(t.TempDir(), "config") + if err := os.WriteFile(path, []byte(kubeconfig), 0600); err != nil { + t.Fatal(err) + } + return path +} diff --git a/internal/runtime/kubernetes/hubble.go b/internal/runtime/kubernetes/hubble.go new file mode 100644 index 00000000..85bcb8a4 --- /dev/null +++ b/internal/runtime/kubernetes/hubble.go @@ -0,0 +1,100 @@ +package kubernetes + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + hubbleflow "github.com/cilium/cilium/api/v1/flow" + hubbleobserver "github.com/cilium/cilium/api/v1/observer" + "github.com/highcard-dev/daemon/internal/core/domain" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type TrafficQuery struct { + Namespace string + ScrollID string + ProcedureName string + Port domain.Port + ExpectedPort domain.ExpectedPort + Window time.Duration +} + +type HubbleClient interface { + HasFlow(ctx context.Context, query TrafficQuery) (bool, error) +} + +type HubbleRelayClient struct { + addr string +} + +func NewHubbleRelayClient(addr string) *HubbleRelayClient { + return &HubbleRelayClient{addr: addr} +} + +func (c *HubbleRelayClient) HasFlow(ctx context.Context, query TrafficQuery) (bool, error) { + if c.addr == "" { + return false, fmt.Errorf("hubble relay address is required") + } + window := query.Window + if window <= 0 { + window = 5 * time.Minute + } + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + conn, err := grpc.NewClient(c.addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return false, err + } + defer conn.Close() + client := hubbleobserver.NewObserverClient(conn) + filter := &hubbleflow.FlowFilter{ + DestinationLabel: []string{ + labelManagedBy + "=druid", + labelScrollID + "=" + dnsLabel(query.ScrollID), + labelProcedure + "=" + dnsLabel(query.ProcedureName), + }, + DestinationPort: []string{fmt.Sprintf("%d", query.Port.Port)}, + } + if query.Namespace != "" { + filter.DestinationPod = []string{query.Namespace + "/"} + } + if protocol := normalizeProtocol(query.Port.Protocol); protocol != "" { + filter.Protocol = []string{protocol} + } + stream, err := client.GetFlows(ctx, &hubbleobserver.GetFlowsRequest{ + Since: timestamppb.New(time.Now().Add(-window)), + Whitelist: []*hubbleflow.FlowFilter{filter}, + }) + if err != nil { + return false, err + } + for { + _, err := stream.Recv() + if err == nil { + return true, nil + } + if ctx.Err() != nil { + return false, nil + } + if errors.Is(err, io.EOF) { + return false, nil + } + return false, err + } +} + +func normalizeProtocol(protocol string) string { + switch protocol { + case "", "tcp", "TCP": + return "tcp" + case "udp", "UDP": + return "udp" + default: + return protocol + } +} diff --git a/internal/runtime/kubernetes/names.go b/internal/runtime/kubernetes/names.go new file mode 100644 index 00000000..a6491d09 --- /dev/null +++ b/internal/runtime/kubernetes/names.go @@ -0,0 +1,99 @@ +package kubernetes + +import ( + "crypto/sha1" + "encoding/hex" + "fmt" + "path" + "regexp" + "strings" +) + +const ( + labelManagedBy = "app.kubernetes.io/managed-by" + labelComponent = "app.kubernetes.io/component" + labelScrollID = "druid.gg/scroll-id" + labelProcedure = "druid.gg/procedure" + labelPortName = "druid.gg/port-name" + labelCommand = "druid.gg/command" +) + +var dnsLabelRe = regexp.MustCompile(`[^a-z0-9-]+`) + +func dnsLabel(value string) string { + value = strings.ToLower(strings.TrimSpace(value)) + value = dnsLabelRe.ReplaceAllString(value, "-") + value = strings.Trim(value, "-") + if value == "" { + value = "scroll" + } + if len(value) <= 50 { + return value + } + hash := shortHash(value) + return strings.Trim(value[:40], "-") + "-" + hash +} + +func shortHash(value string) string { + sum := sha1.Sum([]byte(value)) + return hex.EncodeToString(sum[:])[:10] +} + +func dataPVCName(id string) string { + return dnsLabel("druid-" + id + "-data") +} + +func stagingPVCName(artifact string) string { + return dnsLabel("druid-stage-" + shortHash(artifact)) +} + +func jobName(prefix string, dataRoot string, procedureName string) string { + return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", prefix, refPVCName(dataRoot), procedureName)) +} + +func statefulSetName(dataRoot string, procedureName string) string { + return dnsLabel(fmt.Sprintf("druid-sts-%s-%s", refPVCName(dataRoot), procedureName)) +} + +func serviceName(dataRoot string, procedureName string, portName string) string { + return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", refPVCName(dataRoot), procedureName, portName)) +} + +func ref(namespace string, pvc string) string { + return fmt.Sprintf("k8s://%s/%s", namespace, pvc) +} + +func parseRef(value string) (string, string, error) { + if !strings.HasPrefix(value, "k8s://") { + return "", "", fmt.Errorf("kubernetes backend requires k8s://namespace/pvc refs, got %q", value) + } + trimmed := strings.TrimPrefix(value, "k8s://") + parts := strings.SplitN(trimmed, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("invalid kubernetes ref %q", value) + } + return parts[0], parts[1], nil +} + +func refPVCName(value string) string { + _, pvc, err := parseRef(value) + if err != nil { + return dnsLabel(value) + } + return pvc +} + +func mountSubPath(mountSubPath string) string { + if mountSubPath == "" { + return "data" + } + return path.Join("data", mountSubPath) +} + +func baseLabels(scrollID string) map[string]string { + return map[string]string{ + labelManagedBy: "druid", + labelComponent: "runtime", + labelScrollID: dnsLabel(scrollID), + } +} diff --git a/internal/runtime/kubernetes/resources.go b/internal/runtime/kubernetes/resources.go new file mode 100644 index 00000000..b93e0cf7 --- /dev/null +++ b/internal/runtime/kubernetes/resources.go @@ -0,0 +1,277 @@ +package kubernetes + +import ( + "path/filepath" + "sort" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/highcard-dev/daemon/internal/core/domain" +) + +func pvcSpec(namespace string, name string, storageClass string) *corev1.PersistentVolumeClaim { + quantity := resource.MustParse("1Gi") + spec := corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceStorage: quantity}, + }, + } + if storageClass != "" { + spec.StorageClassName = &storageClass + } + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: baseLabels(name), + }, + Spec: spec, + } +} + +func pullJobSpec(namespace string, jobName string, pvc string, image string, artifact string, registrySecret string, registryPlainHTTP bool) *batchv1.Job { + command := []string{"druid-client", "pull", artifact, "/scroll"} + job := helperJobSpec(namespace, jobName, pvc, image, command, registrySecret, map[string]string{ + labelComponent: "materializer", + }) + if registryPlainHTTP { + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "DRUID_REGISTRY_PLAIN_HTTP", Value: "true"}) + } + return job +} + +func readScrollJobSpec(namespace string, jobName string, pvc string, helperImage string) *batchv1.Job { + return helperJobSpec(namespace, jobName, pvc, helperImage, []string{"cat", "/scroll/scroll.yaml"}, "", map[string]string{ + labelComponent: "read-scroll", + }) +} + +func copyPVCJobSpec(namespace string, jobName string, sourcePVC string, targetPVC string, helperImage string) *batchv1.Job { + labels := map[string]string{ + labelManagedBy: "druid", + labelComponent: "copy-scroll", + } + backoff := int32(1) + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: jobName, Namespace: namespace, Labels: labels}, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoff, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "copy", + Image: helperImage, + Command: []string{"sh", "-c", "cp -a /stage/. /final/"}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "stage", MountPath: "/stage"}, + {Name: "final", MountPath: "/final"}, + }, + }}, + Volumes: []corev1.Volume{ + pvcVolume("stage", sourcePVC), + pvcVolume("final", targetPVC), + }, + }, + }, + }, + } +} + +func helperJobSpec(namespace string, jobName string, pvc string, image string, command []string, registrySecret string, labels map[string]string) *batchv1.Job { + allLabels := map[string]string{ + labelManagedBy: "druid", + } + for key, value := range labels { + allLabels[key] = value + } + backoff := int32(1) + podSpec := corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "main", + Image: image, + Command: command, + VolumeMounts: []corev1.VolumeMount{{Name: "scroll", MountPath: "/scroll"}}, + }}, + Volumes: []corev1.Volume{pvcVolume("scroll", pvc)}, + } + if registrySecret != "" { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: registrySecret}} + } + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: jobName, Namespace: namespace, Labels: allLabels}, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoff, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: allLabels}, + Spec: podSpec, + }, + }, + } +} + +func procedureJobSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, registrySecret string) (*batchv1.Job, error) { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return nil, err + } + labels := baseLabels(pvc) + labels[labelProcedure] = dnsLabel(procedureName) + labels[labelCommand] = dnsLabel(procedureName) + backoff := int32(0) + container := corev1.Container{ + Name: "main", + Image: procedure.Image, + Command: procedure.Command, + WorkingDir: procedure.WorkingDir, + TTY: procedure.TTY, + Stdin: procedure.TTY, + ImagePullPolicy: corev1.PullIfNotPresent, + Env: envVars(procedure.Env), + VolumeMounts: volumeMounts(procedure.Mounts), + } + podSpec := corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{container}, + Volumes: []corev1.Volume{pvcVolume("data", pvc)}, + } + if registrySecret != "" { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: registrySecret}} + } + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName("proc", dataRoot, procedureName), + Namespace: namespace, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoff, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: podSpec, + }, + }, + }, nil +} + +func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, registrySecret string) (*appsv1.StatefulSet, error) { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return nil, err + } + labels := baseLabels(pvc) + labels[labelProcedure] = dnsLabel(procedureName) + labels[labelCommand] = dnsLabel(procedureName) + replicas := int32(1) + container := corev1.Container{ + Name: "main", + Image: procedure.Image, + Command: procedure.Command, + WorkingDir: procedure.WorkingDir, + TTY: procedure.TTY, + Stdin: procedure.TTY, + ImagePullPolicy: corev1.PullIfNotPresent, + Env: envVars(procedure.Env), + VolumeMounts: volumeMounts(procedure.Mounts), + } + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{container}, + Volumes: []corev1.Volume{pvcVolume("data", pvc)}, + } + if registrySecret != "" { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: registrySecret}} + } + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: statefulSetName(dataRoot, procedureName), + Namespace: namespace, + Labels: labels, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + ServiceName: statefulSetName(dataRoot, procedureName), + Selector: &metav1.LabelSelector{MatchLabels: labels}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: podSpec, + }, + }, + }, nil +} + +func serviceSpec(namespace string, dataRoot string, procedureName string, portName string, port domain.Port) (*corev1.Service, error) { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return nil, err + } + labels := baseLabels(pvc) + labels[labelProcedure] = dnsLabel(procedureName) + labels[labelPortName] = dnsLabel(portName) + selector := baseLabels(pvc) + selector[labelProcedure] = dnsLabel(procedureName) + protocol := corev1.ProtocolTCP + if normalizeProtocol(port.Protocol) == "udp" { + protocol = corev1.ProtocolUDP + } + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName(dataRoot, procedureName, portName), + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: selector, + Ports: []corev1.ServicePort{{ + Name: dnsLabel(portName), + Protocol: protocol, + Port: int32(port.Port), + TargetPort: intstr.FromInt(port.Port), + }}, + }, + }, nil +} + +func pvcVolume(name string, pvc string) corev1.Volume { + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvc}, + }, + } +} + +func volumeMounts(mounts []domain.Mount) []corev1.VolumeMount { + result := make([]corev1.VolumeMount, 0, len(mounts)) + for idx, mount := range mounts { + result = append(result, corev1.VolumeMount{ + Name: "data", + MountPath: mount.Path, + SubPath: filepath.ToSlash(mountSubPath(mount.SubPath)), + ReadOnly: mount.ReadOnly, + }) + _ = idx + } + return result +} + +func envVars(values map[string]string) []corev1.EnvVar { + keys := make([]string, 0, len(values)) + for key := range values { + keys = append(keys, key) + } + sort.Strings(keys) + result := make([]corev1.EnvVar, 0, len(keys)) + for _, key := range keys { + result = append(result, corev1.EnvVar{Name: key, Value: values[key]}) + } + return result +} diff --git a/internal/runtime/kubernetes/resources_test.go b/internal/runtime/kubernetes/resources_test.go new file mode 100644 index 00000000..81f3727a --- /dev/null +++ b/internal/runtime/kubernetes/resources_test.go @@ -0,0 +1,235 @@ +package kubernetes + +import ( + "context" + "errors" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" +) + +type fakeHubble struct { + hasFlow bool + err error +} + +func (f fakeHubble) HasFlow(context.Context, TrafficQuery) (bool, error) { + return f.hasFlow, f.err +} + +func TestProcedureJobSpecBuildsDeterministicMountsAndLabels(t *testing.T) { + procedure := &domain.Procedure{ + Image: "alpine:3.20", + Command: []string{"sh", "-c", "echo ok"}, + WorkingDir: "/work", + Env: map[string]string{ + "B": "two", + "A": "one", + }, + Mounts: []domain.Mount{{Path: "/work", SubPath: "cache"}}, + } + + job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, "registry-secret") + if err != nil { + t.Fatal(err) + } + + if job.Namespace != "druid" { + t.Fatalf("namespace = %s, want druid", job.Namespace) + } + if job.Labels[labelManagedBy] != "druid" || job.Labels[labelProcedure] != "start" { + t.Fatalf("labels = %#v", job.Labels) + } + pod := job.Spec.Template.Spec + if len(pod.ImagePullSecrets) != 1 || pod.ImagePullSecrets[0].Name != "registry-secret" { + t.Fatalf("image pull secrets = %#v", pod.ImagePullSecrets) + } + container := pod.Containers[0] + if container.Image != "alpine:3.20" { + t.Fatalf("image = %s", container.Image) + } + if got := container.VolumeMounts[0].SubPath; got != "data/cache" { + t.Fatalf("subPath = %s, want data/cache", got) + } + if container.Env[0].Name != "A" || container.Env[1].Name != "B" { + t.Fatalf("env order = %#v", container.Env) + } +} + +func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { + procedure := &domain.Procedure{ + Image: "nginx:1.27", + Command: []string{"nginx", "-g", "daemon off;"}, + ExpectedPorts: []domain.ExpectedPort{{Name: "http"}}, + Mounts: []domain.Mount{{Path: "/usr/share/nginx/html", SubPath: "site", ReadOnly: true}}, + } + + statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, "registry-secret") + if err != nil { + t.Fatal(err) + } + + if statefulSet.Namespace != "druid" { + t.Fatalf("namespace = %s, want druid", statefulSet.Namespace) + } + if statefulSet.Name != statefulSetName(ref("druid", "druid-static-web-data"), "start") { + t.Fatalf("name = %s", statefulSet.Name) + } + if statefulSet.Spec.Replicas == nil || *statefulSet.Spec.Replicas != 1 { + t.Fatalf("replicas = %#v, want 1", statefulSet.Spec.Replicas) + } + if statefulSet.Spec.Selector.MatchLabels[labelProcedure] != "start" { + t.Fatalf("selector = %#v", statefulSet.Spec.Selector.MatchLabels) + } + pod := statefulSet.Spec.Template.Spec + if len(pod.ImagePullSecrets) != 1 || pod.ImagePullSecrets[0].Name != "registry-secret" { + t.Fatalf("image pull secrets = %#v", pod.ImagePullSecrets) + } + container := pod.Containers[0] + if container.Image != "nginx:1.27" { + t.Fatalf("image = %s", container.Image) + } + if got := container.VolumeMounts[0].SubPath; got != "data/site" { + t.Fatalf("subPath = %s, want data/site", got) + } + if !container.VolumeMounts[0].ReadOnly { + t.Fatal("mount should be read-only") + } +} + +func TestPullJobSpecPropagatesPlainHTTPRegistryEnv(t *testing.T) { + job := pullJobSpec("druid", "pull", "scroll-pvc", "druid-client:test", "registry:5000/lab:1.0", "", true) + + env := job.Spec.Template.Spec.Containers[0].Env + if len(env) != 1 || env[0].Name != "DRUID_REGISTRY_PLAIN_HTTP" || env[0].Value != "true" { + t.Fatalf("env = %#v", env) + } +} + +func TestExpectedPortsUsesHubbleFlowPresence(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{hasFlow: true}) + dataRoot := ref("druid", "druid-static-web-data") + procedureName := "start" + service, err := serviceSpec("druid", dataRoot, procedureName, "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) + if err != nil { + t.Fatal(err) + } + if _, err := client.CoreV1().Services("druid").Create(context.Background(), service, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + ready := true + if _, err := client.DiscoveryV1().EndpointSlices("druid").Create(context.Background(), &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "http", + Namespace: "druid", + Labels: map[string]string{"kubernetes.io/service-name": service.Name}, + }, + Endpoints: []discoveryv1.Endpoint{{Conditions: discoveryv1.EndpointConditions{Ready: &ready}}}, + Ports: []discoveryv1.EndpointPort{{Name: &service.Spec.Ports[0].Name}}, + }, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + statuses, err := backend.ExpectedPorts(dataRoot, map[string]*domain.CommandInstructionSet{ + "start": {Procedures: []*domain.Procedure{{ + Id: &procedureName, + ExpectedPorts: []domain.ExpectedPort{{Name: "http", KeepAliveTraffic: "1b/5m"}}, + }}}, + }, []domain.Port{{Name: "http", Port: 80, Protocol: "tcp"}}) + if err != nil { + t.Fatal(err) + } + if len(statuses) != 1 { + t.Fatalf("statuses = %#v", statuses) + } + status := statuses[0] + if !status.Bound || !status.Traffic || status.TrafficOK == nil || !*status.TrafficOK { + t.Fatalf("status = %#v", status) + } + if status.Source != "hubble-relay" { + t.Fatalf("source = %s, want hubble-relay", status.Source) + } + if status.RXBytes != nil || status.TXBytes != nil || status.TrafficBytes != nil { + t.Fatalf("byte counters should be nil for Kubernetes Hubble status: %#v", status) + } +} + +func TestExpectedPortsDegradesWhenHubbleUnavailable(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{err: errors.New("relay unavailable")}) + dataRoot := ref("druid", "druid-static-web-data") + service, err := serviceSpec("druid", dataRoot, "start", "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) + if err != nil { + t.Fatal(err) + } + if _, err := client.CoreV1().Services("druid").Create(context.Background(), service, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + statuses, err := backend.ExpectedPorts(dataRoot, map[string]*domain.CommandInstructionSet{ + "start": {Procedures: []*domain.Procedure{{ExpectedPorts: []domain.ExpectedPort{{Name: "http", KeepAliveTraffic: "1b/5m"}}}}}, + }, []domain.Port{{Name: "http", Port: 80, Protocol: "tcp"}}) + if err != nil { + t.Fatal(err) + } + if len(statuses) != 1 { + t.Fatalf("statuses = %#v", statuses) + } + status := statuses[0] + if status.Source != "hubble-relay-unavailable" { + t.Fatalf("source = %s, want hubble-relay-unavailable", status.Source) + } + if status.Traffic || status.TrafficOK != nil { + t.Fatalf("traffic should be unavailable: %#v", status) + } +} + +func TestMaterializationRequiresPullImage(t *testing.T) { + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) + _, err := backend.MaterializeScroll(context.Background(), "ghcr.io/example/scroll:latest", "") + if err == nil { + t.Fatal("MaterializeScroll error = nil, want missing pull image error") + } + if !strings.Contains(err.Error(), "pull image is required") { + t.Fatalf("error = %v, want pull image required", err) + } +} + +func TestSignalDeletesPersistentStatefulSetAndPods(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + dataRoot := ref("druid", "druid-static-web-data") + name := statefulSetName(dataRoot, "start") + labels := baseLabels("druid-static-web-data") + labels[labelProcedure] = "start" + if _, err := client.AppsV1().StatefulSets("druid").Create(context.Background(), &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "druid", Labels: labels}, + }, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + if _, err := client.CoreV1().Pods("druid").Create(context.Background(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "start-0", Namespace: "druid", Labels: labels}, + }, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + if err := backend.Signal("", "start", "SIGKILL", dataRoot); err != nil { + t.Fatal(err) + } + if _, err := client.AppsV1().StatefulSets("druid").Get(context.Background(), name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("StatefulSet get error = %v, want not found", err) + } + if _, err := client.CoreV1().Pods("druid").Get(context.Background(), "start-0", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("Pod get error = %v, want not found", err) + } +} diff --git a/internal/runtime/kubernetes/state_store.go b/internal/runtime/kubernetes/state_store.go new file mode 100644 index 00000000..2d2656ce --- /dev/null +++ b/internal/runtime/kubernetes/state_store.go @@ -0,0 +1,236 @@ +package kubernetes + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + k8sclient "k8s.io/client-go/kubernetes" + + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" +) + +const ( + runtimeStateComponent = "runtime-state" + + configMapKeyID = "id" + configMapKeyOwnerID = "owner_id" + configMapKeyArtifact = "artifact" + configMapKeyScrollRoot = "scroll_root" + configMapKeyDataRoot = "data_root" + configMapKeyScrollName = "scroll_name" + configMapKeyScrollYAML = "scroll_yaml" + configMapKeyStatus = "status" + configMapKeyCreatedAt = "created_at" + configMapKeyUpdatedAt = "updated_at" + configMapKeyCommandsJSON = "commands_json" +) + +type ConfigMapStateStore struct { + client k8sclient.Interface + namespace string +} + +func NewConfigMapStateStore(config Config) (*ConfigMapStateStore, error) { + config = config.WithDefaults() + restConfig, namespace, _, _, err := runtimeRESTConfig(config) + if err != nil { + return nil, err + } + client, err := k8sclient.NewForConfig(restConfig) + if err != nil { + return nil, err + } + return NewConfigMapStateStoreWithClient(namespace, client), nil +} + +func NewConfigMapStateStoreWithClient(namespace string, client k8sclient.Interface) *ConfigMapStateStore { + if namespace == "" { + namespace = "default" + } + return &ConfigMapStateStore{client: client, namespace: namespace} +} + +func (s *ConfigMapStateStore) StateDir() string { + return fmt.Sprintf("kubernetes:%s/configmaps", s.namespace) +} + +func (s *ConfigMapStateStore) ScrollRoot(id string) string { + return ref(s.namespace, dataPVCName(id)) +} + +func (s *ConfigMapStateStore) DataRoot(id string) string { + return ref(s.namespace, dataPVCName(id)) +} + +func (s *ConfigMapStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { + now := time.Now().UTC() + scroll.CreatedAt = now + scroll.UpdatedAt = now + if scroll.Status == "" { + scroll.Status = domain.RuntimeScrollStatusCreated + } + if scroll.Commands == nil { + scroll.Commands = map[string]domain.LockStatus{} + } + configMap, err := runtimeScrollConfigMap(s.namespace, scroll) + if err != nil { + return err + } + _, err = s.client.CoreV1().ConfigMaps(s.namespace).Create(context.Background(), configMap, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return fmt.Errorf("%w: %s", coreservices.ErrScrollAlreadyExists, scroll.ID) + } + return err +} + +func (s *ConfigMapStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { + selector := labels.SelectorFromSet(labels.Set{ + labelManagedBy: "druid", + labelComponent: runtimeStateComponent, + }) + configMaps, err := s.client.CoreV1().ConfigMaps(s.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + scrolls := make([]*domain.RuntimeScroll, 0, len(configMaps.Items)) + for i := range configMaps.Items { + scroll, err := runtimeScrollFromConfigMap(&configMaps.Items[i]) + if err != nil { + return nil, err + } + scrolls = append(scrolls, scroll) + } + sort.Slice(scrolls, func(i, j int) bool { + return scrolls[i].ID < scrolls[j].ID + }) + return scrolls, nil +} + +func (s *ConfigMapStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) { + configMap, err := s.client.CoreV1().ConfigMaps(s.namespace).Get(context.Background(), scrollConfigMapName(id), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil, coreservices.ErrScrollNotFound + } + if err != nil { + return nil, err + } + return runtimeScrollFromConfigMap(configMap) +} + +func (s *ConfigMapStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { + current, err := s.client.CoreV1().ConfigMaps(s.namespace).Get(context.Background(), scrollConfigMapName(scroll.ID), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return coreservices.ErrScrollNotFound + } + if err != nil { + return err + } + scroll.UpdatedAt = time.Now().UTC() + next, err := runtimeScrollConfigMap(s.namespace, scroll) + if err != nil { + return err + } + next.ResourceVersion = current.ResourceVersion + _, err = s.client.CoreV1().ConfigMaps(s.namespace).Update(context.Background(), next, metav1.UpdateOptions{}) + if apierrors.IsNotFound(err) { + return coreservices.ErrScrollNotFound + } + return err +} + +func (s *ConfigMapStateStore) DeleteScroll(id string) error { + err := s.client.CoreV1().ConfigMaps(s.namespace).Delete(context.Background(), scrollConfigMapName(id), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return coreservices.ErrScrollNotFound + } + return err +} + +func runtimeScrollConfigMap(namespace string, scroll *domain.RuntimeScroll) (*corev1.ConfigMap, error) { + commands, err := json.Marshal(scroll.Commands) + if err != nil { + return nil, err + } + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: scrollConfigMapName(scroll.ID), + Namespace: namespace, + Labels: map[string]string{ + labelManagedBy: "druid", + labelComponent: runtimeStateComponent, + labelScrollID: dnsLabel(scroll.ID), + "scroll-name": dnsLabel(scroll.ScrollName), + }, + }, + Data: map[string]string{ + configMapKeyID: scroll.ID, + configMapKeyOwnerID: scroll.OwnerID, + configMapKeyArtifact: scroll.Artifact, + configMapKeyScrollRoot: scroll.ScrollRoot, + configMapKeyDataRoot: scroll.DataRoot, + configMapKeyScrollName: scroll.ScrollName, + configMapKeyScrollYAML: scroll.ScrollYAML, + configMapKeyStatus: string(scroll.Status), + configMapKeyCreatedAt: formatRuntimeTime(scroll.CreatedAt), + configMapKeyUpdatedAt: formatRuntimeTime(scroll.UpdatedAt), + configMapKeyCommandsJSON: string(commands), + }, + }, nil +} + +func runtimeScrollFromConfigMap(configMap *corev1.ConfigMap) (*domain.RuntimeScroll, error) { + data := configMap.Data + commandsJSON := data[configMapKeyCommandsJSON] + if commandsJSON == "" { + commandsJSON = "{}" + } + commands := map[string]domain.LockStatus{} + if err := json.Unmarshal([]byte(commandsJSON), &commands); err != nil { + return nil, err + } + id := data[configMapKeyID] + if id == "" { + id = configMap.Labels[labelScrollID] + } + scroll := &domain.RuntimeScroll{ + ID: id, + OwnerID: data[configMapKeyOwnerID], + Artifact: data[configMapKeyArtifact], + ScrollRoot: data[configMapKeyScrollRoot], + DataRoot: data[configMapKeyDataRoot], + ScrollName: data[configMapKeyScrollName], + ScrollYAML: data[configMapKeyScrollYAML], + Status: domain.RuntimeScrollStatus(data[configMapKeyStatus]), + CreatedAt: parseRuntimeTime(data[configMapKeyCreatedAt]), + UpdatedAt: parseRuntimeTime(data[configMapKeyUpdatedAt]), + Commands: commands, + } + if scroll.Status == "" { + scroll.Status = domain.RuntimeScrollStatusCreated + } + return scroll, nil +} + +func scrollConfigMapName(id string) string { + return dnsLabel("druid-scroll-" + id) +} + +func formatRuntimeTime(t time.Time) string { + return t.UTC().Format(time.RFC3339Nano) +} + +func parseRuntimeTime(value string) time.Time { + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return time.Time{} + } + return t +} diff --git a/internal/runtime/kubernetes/state_store_test.go b/internal/runtime/kubernetes/state_store_test.go new file mode 100644 index 00000000..a29b1829 --- /dev/null +++ b/internal/runtime/kubernetes/state_store_test.go @@ -0,0 +1,115 @@ +package kubernetes + +import ( + "errors" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestConfigMapStateStoreRoundTripsRuntimeScroll(t *testing.T) { + store := NewConfigMapStateStoreWithClient("druid", fake.NewSimpleClientset()) + exitCode := 7 + scroll := &domain.RuntimeScroll{ + ID: "container-lab", + Artifact: "registry.local/container-lab:1.0", + ScrollRoot: ref("druid", "druid-container-lab-data"), + DataRoot: ref("druid", "druid-container-lab-data"), + ScrollName: "container-lab", + ScrollYAML: "name: container-lab\n", + Status: domain.RuntimeScrollStatusCreated, + Commands: map[string]domain.LockStatus{ + "verify": {Status: domain.ScrollLockStatusError, ExitCode: &exitCode, LastStatusChange: 123}, + }, + } + + if err := store.CreateScroll(scroll); err != nil { + t.Fatal(err) + } + + got, err := store.GetScroll("container-lab") + if err != nil { + t.Fatal(err) + } + if got.Artifact != scroll.Artifact || got.ScrollRoot != scroll.ScrollRoot || got.ScrollYAML != scroll.ScrollYAML { + t.Fatalf("stored scroll mismatch: %#v", got) + } + if got.Commands["verify"].Status != domain.ScrollLockStatusError { + t.Fatalf("command status = %s, want error", got.Commands["verify"].Status) + } + if got.Commands["verify"].ExitCode == nil || *got.Commands["verify"].ExitCode != exitCode { + t.Fatalf("exit code = %#v, want %d", got.Commands["verify"].ExitCode, exitCode) + } + + got.Status = domain.RuntimeScrollStatusRunning + got.Commands["verify"] = domain.LockStatus{Status: domain.ScrollLockStatusDone, LastStatusChange: 456} + if err := store.UpdateScroll(got); err != nil { + t.Fatal(err) + } + + list, err := store.ListScrolls() + if err != nil { + t.Fatal(err) + } + if len(list) != 1 || list[0].Status != domain.RuntimeScrollStatusRunning || list[0].Commands["verify"].Status != domain.ScrollLockStatusDone { + t.Fatalf("list = %#v, want updated scroll", list) + } + + configMap, err := store.client.CoreV1().ConfigMaps("druid").Get(t.Context(), scrollConfigMapName("container-lab"), metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if configMap.Data[configMapKeyCommandsJSON] == "" { + t.Fatal("commands_json was not stored") + } + + if err := store.DeleteScroll("container-lab"); err != nil { + t.Fatal(err) + } + if _, err := store.GetScroll("container-lab"); !errors.Is(err, coreservices.ErrScrollNotFound) { + t.Fatalf("GetScroll after delete error = %v, want ErrScrollNotFound", err) + } +} + +func TestConfigMapStateStoreDuplicateCreateReturnsConflict(t *testing.T) { + store := NewConfigMapStateStoreWithClient("druid", fake.NewSimpleClientset()) + scroll := &domain.RuntimeScroll{ + ID: "duplicate", + Artifact: "local", + ScrollRoot: ref("druid", "druid-duplicate-data"), + DataRoot: ref("druid", "druid-duplicate-data"), + ScrollName: "duplicate", + ScrollYAML: "name: duplicate\n", + } + + if err := store.CreateScroll(scroll); err != nil { + t.Fatal(err) + } + if err := store.CreateScroll(scroll); !errors.Is(err, coreservices.ErrScrollAlreadyExists) { + t.Fatalf("CreateScroll duplicate error = %v, want ErrScrollAlreadyExists", err) + } +} + +func TestConfigMapStateStoreMissingScrollReturnsNotFound(t *testing.T) { + store := NewConfigMapStateStoreWithClient("druid", fake.NewSimpleClientset()) + if _, err := store.GetScroll("missing"); !errors.Is(err, coreservices.ErrScrollNotFound) { + t.Fatalf("GetScroll error = %v, want ErrScrollNotFound", err) + } + if err := store.DeleteScroll("missing"); !errors.Is(err, coreservices.ErrScrollNotFound) { + t.Fatalf("DeleteScroll error = %v, want ErrScrollNotFound", err) + } +} + +func TestConfigMapStateStoreDerivesKubernetesRoots(t *testing.T) { + store := NewConfigMapStateStoreWithClient("druid", fake.NewSimpleClientset()) + want := "k8s://druid/druid-container-lab-data" + if got := store.ScrollRoot("container-lab"); got != want { + t.Fatalf("ScrollRoot = %s, want %s", got, want) + } + if got := store.DataRoot("container-lab"); got != want { + t.Fatalf("DataRoot = %s, want %s", got, want) + } +} diff --git a/internal/runtime/kubernetes/wait.go b/internal/runtime/kubernetes/wait.go new file mode 100644 index 00000000..b42fc30d --- /dev/null +++ b/internal/runtime/kubernetes/wait.go @@ -0,0 +1,79 @@ +package kubernetes + +import ( + "context" + "time" +) + +const ( + podPollInitial = 500 * time.Millisecond + podPollMax = 3 * time.Second + statefulSetPollInitial = 1 * time.Second + statefulSetPollMax = 5 * time.Second + waitBackoffFactor = 1.25 +) + +type cappedBackoff struct { + current time.Duration + max time.Duration + factor float64 +} + +func newCappedBackoff(initial time.Duration, max time.Duration) *cappedBackoff { + return &cappedBackoff{ + current: initial, + max: max, + factor: waitBackoffFactor, + } +} + +func (b *cappedBackoff) Next() time.Duration { + delay := b.current + next := time.Duration(float64(b.current) * b.factor) + if next <= b.current { + next = b.current + time.Millisecond + } + if next > b.max { + next = b.max + } + b.current = next + return delay +} + +func jobPollInterval(elapsed time.Duration) time.Duration { + switch { + case elapsed < 30*time.Minute: + return 5 * time.Second + case elapsed < time.Hour: + return time.Minute + case elapsed < 2*time.Hour: + return 2 * time.Minute + default: + return 5 * time.Minute + } +} + +func sleepWithContext(ctx context.Context, delay time.Duration) error { + if delay <= 0 { + return nil + } + timer := time.NewTimer(delay) + defer timer.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + } +} + +func sleepUntilNextPoll(ctx context.Context, deadline time.Time, delay time.Duration) error { + remaining := time.Until(deadline) + if remaining <= 0 { + return context.DeadlineExceeded + } + if delay > remaining { + delay = remaining + } + return sleepWithContext(ctx, delay) +} diff --git a/internal/runtime/kubernetes/wait_test.go b/internal/runtime/kubernetes/wait_test.go new file mode 100644 index 00000000..dc61dbaa --- /dev/null +++ b/internal/runtime/kubernetes/wait_test.go @@ -0,0 +1,61 @@ +package kubernetes + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestJobPollIntervalTiers(t *testing.T) { + tests := []struct { + name string + elapsed time.Duration + want time.Duration + }{ + {name: "before thirty minutes", elapsed: 29*time.Minute + 59*time.Second, want: 5 * time.Second}, + {name: "at thirty minutes", elapsed: 30 * time.Minute, want: time.Minute}, + {name: "before one hour", elapsed: 59*time.Minute + 59*time.Second, want: time.Minute}, + {name: "at one hour", elapsed: time.Hour, want: 2 * time.Minute}, + {name: "before two hours", elapsed: 2*time.Hour - time.Second, want: 2 * time.Minute}, + {name: "at two hours", elapsed: 2 * time.Hour, want: 5 * time.Minute}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := jobPollInterval(test.elapsed); got != test.want { + t.Fatalf("jobPollInterval(%s) = %s, want %s", test.elapsed, got, test.want) + } + }) + } +} + +func TestCappedBackoffIncreasesAndCaps(t *testing.T) { + backoff := newCappedBackoff(time.Second, 2*time.Second) + if got := backoff.Next(); got != time.Second { + t.Fatalf("first delay = %s, want 1s", got) + } + if got := backoff.Next(); got != 1250*time.Millisecond { + t.Fatalf("second delay = %s, want 1.25s", got) + } + for i := 0; i < 10; i++ { + _ = backoff.Next() + } + if got := backoff.Next(); got != 2*time.Second { + t.Fatalf("capped delay = %s, want 2s", got) + } +} + +func TestSleepWithContextReturnsOnCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + startedAt := time.Now() + err := sleepWithContext(ctx, time.Hour) + if !errors.Is(err, context.Canceled) { + t.Fatalf("sleepWithContext error = %v, want context.Canceled", err) + } + if elapsed := time.Since(startedAt); elapsed > 100*time.Millisecond { + t.Fatalf("sleepWithContext returned after %s, want immediate cancellation", elapsed) + } +} diff --git a/internal/runtime/runtime_test.go b/internal/runtime/runtime_test.go new file mode 100644 index 00000000..5dcdec17 --- /dev/null +++ b/internal/runtime/runtime_test.go @@ -0,0 +1,109 @@ +package runtime_test + +import ( + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/runtime/docker" +) + +func TestDockerRunCommandBuildsCanonicalMounts(t *testing.T) { + dataRoot := t.TempDir() + procedure := &domain.Procedure{ + Image: "alpine:3.20", + Command: []string{"sh", "-c", "echo ok"}, + WorkingDir: "/cache", + Env: map[string]string{ + "B": "two", + "A": "one", + }, + ExpectedPorts: []domain.ExpectedPort{{Name: "http"}}, + Mounts: []domain.Mount{{Path: "/cache", SubPath: "cache"}}, + } + + spec, err := docker.BuildContainerSpec("start", procedure, dataRoot, []domain.Port{{Name: "http", Port: 8080, Protocol: "http"}}) + if err != nil { + t.Fatal(err) + } + + if spec.Image != "alpine:3.20" { + t.Fatalf("unexpected image: %s", spec.Image) + } + if !reflect.DeepEqual(spec.Command, []string{"sh", "-c", "echo ok"}) { + t.Fatalf("unexpected command: %#v", spec.Command) + } + if spec.WorkingDir != "/cache" { + t.Fatalf("unexpected working dir: %s", spec.WorkingDir) + } + if !reflect.DeepEqual(spec.Env, []string{"A=one", "B=two"}) { + t.Fatalf("unexpected env: %#v", spec.Env) + } + expectedBinds := []string{ + filepath.Join(dataRoot, "data", "cache") + ":/cache", + } + if !reflect.DeepEqual(spec.Binds, expectedBinds) { + t.Fatalf("unexpected binds:\nexpected: %#v\nactual: %#v", expectedBinds, spec.Binds) + } + if len(spec.PortBindings) != 1 { + t.Fatalf("expected one port binding, got %#v", spec.PortBindings) + } + + if _, err := os.Stat(filepath.Join(dataRoot, "data", "cache")); err != nil { + t.Fatalf("expected mount subpath to be created: %v", err) + } +} + +func TestDockerRunCommandDefaultsMountSubPathToDataRoot(t *testing.T) { + dataRoot := t.TempDir() + spec, err := docker.BuildContainerSpec("start", &domain.Procedure{ + Image: "alpine:3.20", + Mounts: []domain.Mount{{Path: "/server"}}, + Command: []string{"true"}, + }, dataRoot, nil) + if err != nil { + t.Fatal(err) + } + expectedBinds := []string{filepath.Join(dataRoot, "data") + ":/server"} + if !reflect.DeepEqual(spec.Binds, expectedBinds) { + t.Fatalf("unexpected binds:\nexpected: %#v\nactual: %#v", expectedBinds, spec.Binds) + } +} + +func TestDockerBuildContainerSpecSupportsTTY(t *testing.T) { + dataRoot := t.TempDir() + + spec, err := docker.BuildContainerSpec("build.0", &domain.Procedure{ + Image: domain.DefaultExecImage, + Command: []string{"bash", "-lc", "echo ok"}, + WorkingDir: "/work", + TTY: true, + }, dataRoot, nil) + if err != nil { + t.Fatal(err) + } + if !spec.TTY { + t.Fatal("expected TTY enabled") + } + if spec.Image != domain.DefaultExecImage { + t.Fatalf("unexpected image: %s", spec.Image) + } +} + +func TestDockerReadScrollFile(t *testing.T) { + scrollRoot := t.TempDir() + want := []byte("name: test\n") + if err := os.WriteFile(filepath.Join(scrollRoot, "scroll.yaml"), want, 0644); err != nil { + t.Fatal(err) + } + backend := &docker.Backend{} + got, err := backend.ReadScrollFile(scrollRoot) + if err != nil { + t.Fatal(err) + } + if string(got) != string(want) { + t.Fatalf("scroll yaml = %q, want %q", got, want) + } +} diff --git a/internal/signals/process_shutdown.go b/internal/signals/process_shutdown.go deleted file mode 100644 index d04ed5b1..00000000 --- a/internal/signals/process_shutdown.go +++ /dev/null @@ -1,158 +0,0 @@ -package signals - -import ( - "context" - "fmt" - "maps" - "os" - "os/signal" - "slices" - "syscall" - "time" - - "github.com/gofiber/fiber/v2" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - processutil "github.com/shirou/gopsutil/process" - "go.uber.org/zap" -) - -type SignalHandler struct { - SigC chan os.Signal - queueManager ports.QueueManagerInterface - processManager ports.ProcessManagerInterface - app *fiber.App - waitSeconds int -} - -func NewSignalHandler(ctx context.Context, queueManager ports.QueueManagerInterface, processManager ports.ProcessManagerInterface, app *fiber.App, waitSeconds int) *SignalHandler { - sh := &SignalHandler{ - SigC: make(chan os.Signal, 1), - queueManager: queueManager, - processManager: processManager, - app: app, - waitSeconds: waitSeconds, - } - - sh.SetupSignals(ctx) - - return sh -} - -func (sh *SignalHandler) SetApp(app *fiber.App) { - sh.app = app -} - -func (sh *SignalHandler) SetupSignals(ctx context.Context) { - - signal.Notify(sh.SigC, - syscall.SIGHUP, - syscall.SIGINT, - syscall.SIGTERM, - syscall.SIGQUIT, - os.Interrupt, - // syscall.SIGCHLD, - ) - - go func() { - var s os.Signal - select { - case s = <-sh.SigC: - logger.Log().Info("Received shudown signal", zap.String("signal", s.String())) - case <-ctx.Done(): - logger.Log().Info("Context done") - //debug timeout for testing - //case <-time.After(time.Duration(25) * time.Second): - // s = syscall.SIGTERM - go sh.queueManager.AddShutdownItem("stop") - } - - sh.GracefulShutdown() - }() -} - -func (sh *SignalHandler) ExtendedShutdownRoutine() { - - shudownDone := make(chan struct{}) - go func() { - waitForProcessesToStop(sh.processManager) - shudownDone <- struct{}{} - }() - - //TODO: refactor this - done := false - go func() { - //wait for some time to await the sigterm - <-time.After(time.Duration(sh.waitSeconds) * time.Second) - go sh.queueManager.AddShutdownItem("stop") - <-time.After(time.Duration(sh.waitSeconds) * time.Second) - if done { - return - } - go shutdownRoutine(sh.processManager, syscall.SIGTERM) - <-time.After(time.Duration(sh.waitSeconds) * time.Second) - if done { - return - } - go shutdownRoutine(sh.processManager, syscall.SIGKILL) - }() - - <-shudownDone - done = true -} - -func (sh *SignalHandler) GracefulShutdown() { - - logger.Log().Info("Graceful shutdown started") - - logger.Log().Info("Shutdown Routine") - sh.ExtendedShutdownRoutine() - - logger.Log().Info("Shutting down app") - if sh.app != nil { - sh.app.Shutdown() - } - - logger.Log().Info("Shutdown done") - -} - -func waitForProcessesToStop(processManager ports.ProcessManagerInterface) { - for { - if len(processManager.GetRunningProcesses()) == 0 { - logger.Log().Info("No running processes") - break - } - runningPorcesses := processManager.GetRunningProcesses() - keys := slices.Collect(maps.Keys(runningPorcesses)) - - logger.Log().Info(fmt.Sprintf("Waiting for %d processes to stop...", len(runningPorcesses)), zap.Strings("processes", keys)) - time.Sleep(time.Second) - } -} - -func shutdownRoutine(processManager ports.ProcessManagerInterface, signal syscall.Signal) { - - logger.Log().Info("Still not done, killing all processes with signal", zap.String("signal", signal.String())) - for _, process := range processManager.GetRunningProcesses() { - p, err := processutil.NewProcess(int32(process.Status().Pid)) - if err != nil { - break - } - running, _ := p.IsRunning() - if running { - //pgid, err := syscall.Getpgid(process.Status().Pid) - //if err == nil { - // syscall.Kill(-pgid, signal) // note the minus sign - //} else { - //normal stop without pgid - process.Cmd.Process.Signal(signal) - //} - } - } -} - -func (sh *SignalHandler) Stop() { - sh.GracefulShutdown() - -} diff --git a/internal/utils/artifact.go b/internal/utils/artifact.go index b9aaad8f..f5d4b0e2 100644 --- a/internal/utils/artifact.go +++ b/internal/utils/artifact.go @@ -3,12 +3,11 @@ package utils import "strings" func SplitArtifact(url string) (string, string) { - parts := strings.Split(url, ":") - if len(parts) != 2 { + repo, ref, kind := ParseArtifactRef(url) + if kind != ArtifactRefKindTag { return "", "" } - repo, tag := parts[0], parts[1] - return repo, tag + return repo, ref } type ArtifactRefKind string diff --git a/internal/utils/artifact_test.go b/internal/utils/artifact_test.go index 4137131d..a27e87ca 100644 --- a/internal/utils/artifact_test.go +++ b/internal/utils/artifact_test.go @@ -78,3 +78,10 @@ func TestParseArtifactRef(t *testing.T) { } } +func TestSplitArtifactSupportsRegistryPort(t *testing.T) { + repo, tag := SplitArtifact("localhost:5001/container-lab:1.0") + + if repo != "localhost:5001/container-lab" || tag != "1.0" { + t.Fatalf("SplitArtifact repo=%q tag=%q, want localhost:5001/container-lab 1.0", repo, tag) + } +} diff --git a/internal/utils/logger/logger.go b/internal/utils/logger/logger.go index eefa7177..8ec95db1 100644 --- a/internal/utils/logger/logger.go +++ b/internal/utils/logger/logger.go @@ -28,7 +28,6 @@ const ( LogContextScroll = "scroll" LogContextProcedure = "scroll-procedure" LogContextMonitor = "monitor" - LogContextProcess = "scroll-process" LogContextWebSocket = "web-socket" LogContextUpdate = "update" LogContextView = "view" diff --git a/internal/utils/logger/plugin_logger.go b/internal/utils/logger/plugin_logger.go deleted file mode 100644 index d7bd77bd..00000000 --- a/internal/utils/logger/plugin_logger.go +++ /dev/null @@ -1,126 +0,0 @@ -//taken from: https://github.com/serverless/event-gateway/blob/92b773f27dc856fa3db08c772963cb8c64f1ce89/plugin/logger.go (modified) -package logger - -import ( - "fmt" - "io" - "io/ioutil" - "log" - - hclog "github.com/hashicorp/go-hclog" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type Level = hclog.Level - -// Hclog2ZapLogger implements Hashicorp's hclog.Logger interface using Uber's zap.Logger. It's a workaround for plugin -// system. go-plugin doesn't support other logger than hclog. This logger implements only methods used by the go-plugin. -type Hclog2ZapLogger struct { - Zap *zap.Logger -} - -// Trace implementation. -func (l Hclog2ZapLogger) Trace(msg string, args ...interface{}) {} - -// Debug implementation. -func (l Hclog2ZapLogger) Debug(msg string, args ...interface{}) { - l.Zap.Debug(msg, argsToFields(args...)...) -} - -// Info implementation. -func (l Hclog2ZapLogger) Info(msg string, args ...interface{}) { - l.Zap.Info(msg, argsToFields(args...)...) -} - -// Log logs messages with four simplified levels - Debug,Warn,Error and Info as a default. -func (l Hclog2ZapLogger) Log(lvl Level, msg string, args ...interface{}) { - switch lvl { - case hclog.Debug: - l.Debug(msg, args...) - case hclog.Warn: - l.Warn(msg, args...) - case hclog.Error: - l.Error(msg, args...) - case hclog.DefaultLevel, hclog.Info, hclog.NoLevel, hclog.Off, hclog.Trace: - l.Info(msg, args...) - } -} - -// Info implementation. -func (l Hclog2ZapLogger) Name() string { - return "hclog2zap" -} - -// Warn implementation. -func (l Hclog2ZapLogger) Warn(msg string, args ...interface{}) { - l.Zap.Warn(msg, argsToFields(args...)...) -} - -// Error implementation. -func (l Hclog2ZapLogger) Error(msg string, args ...interface{}) { - l.Zap.Error(msg, argsToFields(args...)...) -} - -// IsTrace implementation. -func (l Hclog2ZapLogger) IsTrace() bool { return false } - -// IsDebug implementation. -func (l Hclog2ZapLogger) IsDebug() bool { return false } - -// IsInfo implementation. -func (l Hclog2ZapLogger) IsInfo() bool { return false } - -// IsWarn implementation. -func (l Hclog2ZapLogger) IsWarn() bool { return false } - -// IsError implementation. -func (l Hclog2ZapLogger) IsError() bool { return false } - -// GetLevel implementation. -func (l Hclog2ZapLogger) GetLevel() hclog.Level { return hclog.Level(0) } - -// GetLevel implementation. -func (l Hclog2ZapLogger) ImpliedArgs() []interface{} { return nil } - -// StandardWriter returns os.Stderr as io.Writer. -func (l Hclog2ZapLogger) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer { - return hclog.DefaultOutput -} - -// With implementation. -func (l Hclog2ZapLogger) With(args ...interface{}) hclog.Logger { - return Hclog2ZapLogger{Zap: l.Zap.With(argsToFields(args...)...)} -} - -// Named implementation. -func (l Hclog2ZapLogger) Named(name string) hclog.Logger { - return Hclog2ZapLogger{Zap: l.Zap.Named(name)} -} - -// ResetNamed implementation. -func (l Hclog2ZapLogger) ResetNamed(name string) hclog.Logger { - // no need to implement that as go-plugin doesn't use this method. - return Hclog2ZapLogger{} -} - -// SetLevel implementation. -func (l Hclog2ZapLogger) SetLevel(level hclog.Level) { - // no need to implement that as go-plugin doesn't use this method. -} - -// StandardLogger implementation. -func (l Hclog2ZapLogger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger { - // no need to implement that as go-plugin doesn't use this method. - return log.New(ioutil.Discard, "", 0) -} - -func argsToFields(args ...interface{}) []zapcore.Field { - fields := []zapcore.Field{} - for i := 0; i < len(args); i += 2 { - fields = append(fields, zap.String(args[i].(string), fmt.Sprintf("%v", args[i+1]))) - } - - return fields -} diff --git a/internal/utils/runtime_socket.go b/internal/utils/runtime_socket.go new file mode 100644 index 00000000..c44563c0 --- /dev/null +++ b/internal/utils/runtime_socket.go @@ -0,0 +1,22 @@ +package utils + +import ( + "fmt" + "os" + "path/filepath" +) + +func DefaultRuntimeSocketPath() string { + if runtimeDir := os.Getenv("XDG_RUNTIME_DIR"); runtimeDir != "" { + return filepath.Join(runtimeDir, "druid", "runtime.sock") + } + return filepath.Join(os.TempDir(), fmt.Sprintf("druid-%d-runtime.sock", os.Getuid())) +} + +func DefaultRuntimeStateDir() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".druid", "runtime"), nil +} diff --git a/plugin/config.go b/plugin/config.go deleted file mode 100644 index 2d1019b2..00000000 --- a/plugin/config.go +++ /dev/null @@ -1,23 +0,0 @@ -package plugin - -import "gopkg.in/yaml.v2" - -func GetConfig[T interface{}](pluginName string, scrollConfigRawYaml []byte) (T, error) { - var Config T - - var scrollConfig map[string]interface{} - - yaml.Unmarshal(scrollConfigRawYaml, &scrollConfig) - - rcon := scrollConfig[pluginName] - - b, err := yaml.Marshal(rcon) - - if err != nil { - return Config, err - } - - yaml.Unmarshal(b, &Config) - - return Config, nil -} diff --git a/plugin/environment.go b/plugin/environment.go deleted file mode 100644 index 6ac04b26..00000000 --- a/plugin/environment.go +++ /dev/null @@ -1,22 +0,0 @@ -package plugin - -import ( - "fmt" -) - -type Environment struct { - Address string - Password string -} - -func NewPluginEnvironment(cwd string, password string, port int, host string) (*Environment, error) { - environment := &Environment{} - if host == "" { - host = "localhost" - } - - environment.Address = fmt.Sprintf("%s:%d", host, port) - environment.Password = password - - return environment, nil -} diff --git a/plugin/grpc.go b/plugin/grpc.go deleted file mode 100644 index ad73abc0..00000000 --- a/plugin/grpc.go +++ /dev/null @@ -1,115 +0,0 @@ -package plugin - -import ( - plugin "github.com/hashicorp/go-plugin" - "github.com/highcard-dev/daemon/plugin/proto" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -// GRPCClient is an implementation of KV that talks over RPC. -type GRPCClient struct { - broker *plugin.GRPCBroker - client proto.PluginClient -} - -func (m *GRPCClient) GetModes() ([]*proto.GetModeResponse_Mode, error) { - resp, err := m.client.GetModes(context.Background(), &proto.EmptyPluginResponse{}) - if err != nil { - return nil, err - } - - return resp.Modes, nil -} - -func (m *GRPCClient) RunProcedure(mode string, data string) (string, error) { - resp, err := m.client.RunProcedure(context.Background(), &proto.ProcedureRequest{Mode: mode, Data: data}) - if err != nil { - return "", err - } - - return resp.Data, nil -} - -func (m *GRPCClient) Init(pluginMap map[string]string, a DruidDaemon, cwd string, config string) error { - addHelperServer := &GRPCAddHelperServer{Impl: a} - - var s *grpc.Server - serverFunc := func(opts []grpc.ServerOption) *grpc.Server { - s = grpc.NewServer(opts...) - proto.RegisterDaemonServiceServer(s, addHelperServer) - - return s - } - - brokerID := m.broker.NextId() - go m.broker.AcceptAndServe(brokerID, serverFunc) - - _, err := m.client.Init(context.Background(), &proto.InitRequest{PluginConfig: pluginMap, DruidServer: brokerID, Cwd: cwd, ScrollConfig: config}) - if err != nil { - return err - } - - return nil -} - -// Here is the gRPC server that GRPCClient talks to. -type GRPCServer struct { - proto.PluginServer - - Impl DruidPluginInterface - broker *plugin.GRPCBroker -} - -func (m *GRPCServer) GetModes(ctx context.Context, req *proto.EmptyPluginResponse) (*proto.GetModeResponse, error) { - v, err := m.Impl.GetModes() - return &proto.GetModeResponse{Modes: v}, err - -} - -func (m *GRPCServer) RunProcedure(ctx context.Context, req *proto.ProcedureRequest) (*proto.ProcedureResponse, error) { - v, err := m.Impl.RunProcedure(req.Mode, req.Data) - if err != nil { - return nil, err - } - return &proto.ProcedureResponse{Data: v}, nil - -} - -func (m *GRPCServer) Init(ctx context.Context, req *proto.InitRequest) (*proto.EmptyPluginResponse, error) { - - conn, err := m.broker.Dial(req.DruidServer) - if err != nil { - return &proto.EmptyPluginResponse{}, err - } - - a := &GRPCAddHelperClient{proto.NewDaemonServiceClient(conn)} - - err = m.Impl.Init(req.PluginConfig, a, req.Cwd, req.ScrollConfig) - return &proto.EmptyPluginResponse{}, err - -} - -// GRPCClient is an implementation of KV that talks over RPC. -type GRPCAddHelperClient struct{ client proto.DaemonServiceClient } - -func (m *GRPCAddHelperClient) NotifyConsole(mode string, data string) error { - _, err := m.client.NotifyConsole(context.Background(), &proto.ConsoleNotification{ - Mode: mode, - Data: data, - }) - return err -} - -// Here is the gRPC server that GRPCClient talks to. -type GRPCAddHelperServer struct { - proto.DaemonServiceServer - // This is the real implementation - Impl DruidDaemon -} - -func (m *GRPCAddHelperServer) NotifyConsole(ctx context.Context, req *proto.ConsoleNotification) (*proto.EmptyDaemonResponse, error) { - err := m.Impl.NotifyConsole(req.Mode, req.Data) - - return &proto.EmptyDaemonResponse{}, err -} diff --git a/plugin/interface.go b/plugin/interface.go deleted file mode 100644 index 7a31ed54..00000000 --- a/plugin/interface.go +++ /dev/null @@ -1,43 +0,0 @@ -package plugin - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "github.com/highcard-dev/daemon/plugin/proto" - "google.golang.org/grpc" -) - -// DruidDaemon is the interface that we're exposing as a plugin. -type DruidDaemon interface { - NotifyConsole(string, string) error -} - -// DruidPluginInterface is the interface that we're exposing as a plugin. -type DruidPluginInterface interface { - GetModes() ([]*proto.GetModeResponse_Mode, error) - RunProcedure(string, string) (string, error) - Init(map[string]string, DruidDaemon, string, string) error -} - -type DruidRpcPlugin struct { - plugin.NetRPCUnsupportedPlugin - Impl DruidPluginInterface -} - -func (p *DruidRpcPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterPluginServer(s, &GRPCServer{ - Impl: p.Impl, - broker: broker, - }) - return nil -} - -func (p *DruidRpcPlugin) GRPCClient(_ context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCClient{ - client: proto.NewPluginClient(c), - broker: broker, - }, nil -} - -var _ plugin.GRPCPlugin = &DruidRpcPlugin{} diff --git a/plugin/proto/daemon_service.pb.go b/plugin/proto/daemon_service.pb.go deleted file mode 100644 index dddcec8f..00000000 --- a/plugin/proto/daemon_service.pb.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.8 -// source: plugin/proto/daemon_service.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type EmptyDaemonResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *EmptyDaemonResponse) Reset() { - *x = EmptyDaemonResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_daemon_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EmptyDaemonResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EmptyDaemonResponse) ProtoMessage() {} - -func (x *EmptyDaemonResponse) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_daemon_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EmptyDaemonResponse.ProtoReflect.Descriptor instead. -func (*EmptyDaemonResponse) Descriptor() ([]byte, []int) { - return file_plugin_proto_daemon_service_proto_rawDescGZIP(), []int{0} -} - -type ConsoleNotification struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Mode string `protobuf:"bytes,1,opt,name=mode,proto3" json:"mode,omitempty"` - Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *ConsoleNotification) Reset() { - *x = ConsoleNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_daemon_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConsoleNotification) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConsoleNotification) ProtoMessage() {} - -func (x *ConsoleNotification) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_daemon_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConsoleNotification.ProtoReflect.Descriptor instead. -func (*ConsoleNotification) Descriptor() ([]byte, []int) { - return file_plugin_proto_daemon_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ConsoleNotification) GetMode() string { - if x != nil { - return x.Mode - } - return "" -} - -func (x *ConsoleNotification) GetData() string { - if x != nil { - return x.Data - } - return "" -} - -var File_plugin_proto_daemon_service_proto protoreflect.FileDescriptor - -var file_plugin_proto_daemon_service_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, - 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x3d, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x32, 0x58, 0x0a, 0x0d, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x73, 0x6f, - 0x6c, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x6f, - 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1a, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x44, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1f, 0x5a, 0x1d, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x67, 0x68, 0x63, 0x61, 0x72, - 0x64, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_plugin_proto_daemon_service_proto_rawDescOnce sync.Once - file_plugin_proto_daemon_service_proto_rawDescData = file_plugin_proto_daemon_service_proto_rawDesc -) - -func file_plugin_proto_daemon_service_proto_rawDescGZIP() []byte { - file_plugin_proto_daemon_service_proto_rawDescOnce.Do(func() { - file_plugin_proto_daemon_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_plugin_proto_daemon_service_proto_rawDescData) - }) - return file_plugin_proto_daemon_service_proto_rawDescData -} - -var file_plugin_proto_daemon_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_plugin_proto_daemon_service_proto_goTypes = []interface{}{ - (*EmptyDaemonResponse)(nil), // 0: proto.EmptyDaemonResponse - (*ConsoleNotification)(nil), // 1: proto.ConsoleNotification -} -var file_plugin_proto_daemon_service_proto_depIdxs = []int32{ - 1, // 0: proto.DaemonService.NotifyConsole:input_type -> proto.ConsoleNotification - 0, // 1: proto.DaemonService.NotifyConsole:output_type -> proto.EmptyDaemonResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_plugin_proto_daemon_service_proto_init() } -func file_plugin_proto_daemon_service_proto_init() { - if File_plugin_proto_daemon_service_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_plugin_proto_daemon_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmptyDaemonResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_plugin_proto_daemon_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConsoleNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_plugin_proto_daemon_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_plugin_proto_daemon_service_proto_goTypes, - DependencyIndexes: file_plugin_proto_daemon_service_proto_depIdxs, - MessageInfos: file_plugin_proto_daemon_service_proto_msgTypes, - }.Build() - File_plugin_proto_daemon_service_proto = out.File - file_plugin_proto_daemon_service_proto_rawDesc = nil - file_plugin_proto_daemon_service_proto_goTypes = nil - file_plugin_proto_daemon_service_proto_depIdxs = nil -} diff --git a/plugin/proto/daemon_service.proto b/plugin/proto/daemon_service.proto deleted file mode 100644 index a330b09a..00000000 --- a/plugin/proto/daemon_service.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package proto; - -option go_package = "github.com/highcard-dev/proto"; - -message EmptyDaemonResponse {} - -message ConsoleNotification { - string mode = 1; - string data = 2; -} - -service DaemonService { - rpc NotifyConsole(ConsoleNotification) returns (EmptyDaemonResponse); -} diff --git a/plugin/proto/daemon_service_grpc.pb.go b/plugin/proto/daemon_service_grpc.pb.go deleted file mode 100644 index 483ba735..00000000 --- a/plugin/proto/daemon_service_grpc.pb.go +++ /dev/null @@ -1,105 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.8 -// source: plugin/proto/daemon_service.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// DaemonServiceClient is the client API for DaemonService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type DaemonServiceClient interface { - NotifyConsole(ctx context.Context, in *ConsoleNotification, opts ...grpc.CallOption) (*EmptyDaemonResponse, error) -} - -type daemonServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { - return &daemonServiceClient{cc} -} - -func (c *daemonServiceClient) NotifyConsole(ctx context.Context, in *ConsoleNotification, opts ...grpc.CallOption) (*EmptyDaemonResponse, error) { - out := new(EmptyDaemonResponse) - err := c.cc.Invoke(ctx, "/proto.DaemonService/NotifyConsole", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DaemonServiceServer is the server API for DaemonService service. -// All implementations must embed UnimplementedDaemonServiceServer -// for forward compatibility -type DaemonServiceServer interface { - NotifyConsole(context.Context, *ConsoleNotification) (*EmptyDaemonResponse, error) - mustEmbedUnimplementedDaemonServiceServer() -} - -// UnimplementedDaemonServiceServer must be embedded to have forward compatible implementations. -type UnimplementedDaemonServiceServer struct { -} - -func (UnimplementedDaemonServiceServer) NotifyConsole(context.Context, *ConsoleNotification) (*EmptyDaemonResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NotifyConsole not implemented") -} -func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} - -// UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to DaemonServiceServer will -// result in compilation errors. -type UnsafeDaemonServiceServer interface { - mustEmbedUnimplementedDaemonServiceServer() -} - -func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { - s.RegisterService(&DaemonService_ServiceDesc, srv) -} - -func _DaemonService_NotifyConsole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ConsoleNotification) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServiceServer).NotifyConsole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.DaemonService/NotifyConsole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServiceServer).NotifyConsole(ctx, req.(*ConsoleNotification)) - } - return interceptor(ctx, in, info, handler) -} - -// DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var DaemonService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "proto.DaemonService", - HandlerType: (*DaemonServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NotifyConsole", - Handler: _DaemonService_NotifyConsole_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "plugin/proto/daemon_service.proto", -} diff --git a/plugin/proto/plugin_service.pb.go b/plugin/proto/plugin_service.pb.go deleted file mode 100644 index 1115f022..00000000 --- a/plugin/proto/plugin_service.pb.go +++ /dev/null @@ -1,526 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.8 -// source: plugin/proto/plugin_service.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type GetModeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Modes []*GetModeResponse_Mode `protobuf:"bytes,1,rep,name=modes,proto3" json:"modes,omitempty"` -} - -func (x *GetModeResponse) Reset() { - *x = GetModeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetModeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetModeResponse) ProtoMessage() {} - -func (x *GetModeResponse) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetModeResponse.ProtoReflect.Descriptor instead. -func (*GetModeResponse) Descriptor() ([]byte, []int) { - return file_plugin_proto_plugin_service_proto_rawDescGZIP(), []int{0} -} - -func (x *GetModeResponse) GetModes() []*GetModeResponse_Mode { - if x != nil { - return x.Modes - } - return nil -} - -type InitRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PluginConfig map[string]string `protobuf:"bytes,1,rep,name=plugin_config,json=pluginConfig,proto3" json:"plugin_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DruidServer uint32 `protobuf:"varint,2,opt,name=druid_server,json=druidServer,proto3" json:"druid_server,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - ScrollConfig string `protobuf:"bytes,4,opt,name=scroll_config,json=scrollConfig,proto3" json:"scroll_config,omitempty"` -} - -func (x *InitRequest) Reset() { - *x = InitRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InitRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InitRequest) ProtoMessage() {} - -func (x *InitRequest) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InitRequest.ProtoReflect.Descriptor instead. -func (*InitRequest) Descriptor() ([]byte, []int) { - return file_plugin_proto_plugin_service_proto_rawDescGZIP(), []int{1} -} - -func (x *InitRequest) GetPluginConfig() map[string]string { - if x != nil { - return x.PluginConfig - } - return nil -} - -func (x *InitRequest) GetDruidServer() uint32 { - if x != nil { - return x.DruidServer - } - return 0 -} - -func (x *InitRequest) GetCwd() string { - if x != nil { - return x.Cwd - } - return "" -} - -func (x *InitRequest) GetScrollConfig() string { - if x != nil { - return x.ScrollConfig - } - return "" -} - -type ProcedureRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Mode string `protobuf:"bytes,1,opt,name=mode,proto3" json:"mode,omitempty"` - Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *ProcedureRequest) Reset() { - *x = ProcedureRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProcedureRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcedureRequest) ProtoMessage() {} - -func (x *ProcedureRequest) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcedureRequest.ProtoReflect.Descriptor instead. -func (*ProcedureRequest) Descriptor() ([]byte, []int) { - return file_plugin_proto_plugin_service_proto_rawDescGZIP(), []int{2} -} - -func (x *ProcedureRequest) GetMode() string { - if x != nil { - return x.Mode - } - return "" -} - -func (x *ProcedureRequest) GetData() string { - if x != nil { - return x.Data - } - return "" -} - -type ProcedureResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *ProcedureResponse) Reset() { - *x = ProcedureResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProcedureResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcedureResponse) ProtoMessage() {} - -func (x *ProcedureResponse) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcedureResponse.ProtoReflect.Descriptor instead. -func (*ProcedureResponse) Descriptor() ([]byte, []int) { - return file_plugin_proto_plugin_service_proto_rawDescGZIP(), []int{3} -} - -func (x *ProcedureResponse) GetData() string { - if x != nil { - return x.Data - } - return "" -} - -type EmptyPluginResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *EmptyPluginResponse) Reset() { - *x = EmptyPluginResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EmptyPluginResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EmptyPluginResponse) ProtoMessage() {} - -func (x *EmptyPluginResponse) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EmptyPluginResponse.ProtoReflect.Descriptor instead. -func (*EmptyPluginResponse) Descriptor() ([]byte, []int) { - return file_plugin_proto_plugin_service_proto_rawDescGZIP(), []int{4} -} - -type GetModeResponse_Mode struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Standalone bool `protobuf:"varint,1,opt,name=standalone,proto3" json:"standalone,omitempty"` - Mode string `protobuf:"bytes,2,opt,name=mode,proto3" json:"mode,omitempty"` -} - -func (x *GetModeResponse_Mode) Reset() { - *x = GetModeResponse_Mode{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetModeResponse_Mode) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetModeResponse_Mode) ProtoMessage() {} - -func (x *GetModeResponse_Mode) ProtoReflect() protoreflect.Message { - mi := &file_plugin_proto_plugin_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetModeResponse_Mode.ProtoReflect.Descriptor instead. -func (*GetModeResponse_Mode) Descriptor() ([]byte, []int) { - return file_plugin_proto_plugin_service_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *GetModeResponse_Mode) GetStandalone() bool { - if x != nil { - return x.Standalone - } - return false -} - -func (x *GetModeResponse_Mode) GetMode() string { - if x != nil { - return x.Mode - } - return "" -} - -var File_plugin_proto_plugin_service_proto protoreflect.FileDescriptor - -var file_plugin_proto_plugin_service_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x01, 0x0a, 0x0f, 0x47, - 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, - 0x0a, 0x05, 0x6d, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6d, 0x6f, 0x64, 0x65, - 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x6e, 0x64, 0x61, 0x6c, 0x6f, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, - 0x74, 0x61, 0x6e, 0x64, 0x61, 0x6c, 0x6f, 0x6e, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0xf3, 0x01, - 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, - 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x6e, 0x69, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x72, 0x75, 0x69, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, - 0x64, 0x72, 0x75, 0x69, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x63, - 0x77, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x77, 0x64, 0x12, 0x23, 0x0a, - 0x0d, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x3f, 0x0a, 0x11, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x64, 0x75, 0x72, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, - 0x27, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x64, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x15, 0x0a, 0x13, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, - 0xc3, 0x01, 0x0a, 0x06, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x3e, 0x0a, 0x08, 0x47, 0x65, - 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x6f, - 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x49, 0x6e, - 0x69, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x64, 0x75, - 0x72, 0x65, 0x12, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, - 0x64, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x64, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x67, 0x68, 0x63, 0x61, 0x72, 0x64, 0x2d, 0x64, 0x65, 0x76, - 0x2f, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_plugin_proto_plugin_service_proto_rawDescOnce sync.Once - file_plugin_proto_plugin_service_proto_rawDescData = file_plugin_proto_plugin_service_proto_rawDesc -) - -func file_plugin_proto_plugin_service_proto_rawDescGZIP() []byte { - file_plugin_proto_plugin_service_proto_rawDescOnce.Do(func() { - file_plugin_proto_plugin_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_plugin_proto_plugin_service_proto_rawDescData) - }) - return file_plugin_proto_plugin_service_proto_rawDescData -} - -var file_plugin_proto_plugin_service_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_plugin_proto_plugin_service_proto_goTypes = []interface{}{ - (*GetModeResponse)(nil), // 0: proto.GetModeResponse - (*InitRequest)(nil), // 1: proto.InitRequest - (*ProcedureRequest)(nil), // 2: proto.ProcedureRequest - (*ProcedureResponse)(nil), // 3: proto.ProcedureResponse - (*EmptyPluginResponse)(nil), // 4: proto.EmptyPluginResponse - (*GetModeResponse_Mode)(nil), // 5: proto.GetModeResponse.Mode - nil, // 6: proto.InitRequest.PluginConfigEntry -} -var file_plugin_proto_plugin_service_proto_depIdxs = []int32{ - 5, // 0: proto.GetModeResponse.modes:type_name -> proto.GetModeResponse.Mode - 6, // 1: proto.InitRequest.plugin_config:type_name -> proto.InitRequest.PluginConfigEntry - 4, // 2: proto.Plugin.GetModes:input_type -> proto.EmptyPluginResponse - 1, // 3: proto.Plugin.Init:input_type -> proto.InitRequest - 2, // 4: proto.Plugin.RunProcedure:input_type -> proto.ProcedureRequest - 0, // 5: proto.Plugin.GetModes:output_type -> proto.GetModeResponse - 4, // 6: proto.Plugin.Init:output_type -> proto.EmptyPluginResponse - 3, // 7: proto.Plugin.RunProcedure:output_type -> proto.ProcedureResponse - 5, // [5:8] is the sub-list for method output_type - 2, // [2:5] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_plugin_proto_plugin_service_proto_init() } -func file_plugin_proto_plugin_service_proto_init() { - if File_plugin_proto_plugin_service_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_plugin_proto_plugin_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetModeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_plugin_proto_plugin_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_plugin_proto_plugin_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcedureRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_plugin_proto_plugin_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcedureResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_plugin_proto_plugin_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmptyPluginResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_plugin_proto_plugin_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetModeResponse_Mode); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_plugin_proto_plugin_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 7, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_plugin_proto_plugin_service_proto_goTypes, - DependencyIndexes: file_plugin_proto_plugin_service_proto_depIdxs, - MessageInfos: file_plugin_proto_plugin_service_proto_msgTypes, - }.Build() - File_plugin_proto_plugin_service_proto = out.File - file_plugin_proto_plugin_service_proto_rawDesc = nil - file_plugin_proto_plugin_service_proto_goTypes = nil - file_plugin_proto_plugin_service_proto_depIdxs = nil -} diff --git a/plugin/proto/plugin_service.proto b/plugin/proto/plugin_service.proto deleted file mode 100644 index a68e80c6..00000000 --- a/plugin/proto/plugin_service.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package proto; - -option go_package = "github.com/highcard-dev/daemon/plugin/proto"; - -message GetModeResponse{ - message Mode { - bool standalone = 1; - string mode = 2; - } - repeated Mode modes = 1; -} - -message InitRequest { - map plugin_config = 1; - uint32 druid_server = 2; - string cwd = 3; - string scroll_config = 4; -} - -message ProcedureRequest { - string mode = 1; - string data = 2; -} - -message ProcedureResponse { - string data = 1; -} - -message EmptyPluginResponse {} - -service Plugin { - rpc GetModes(EmptyPluginResponse) returns (GetModeResponse); - rpc Init(InitRequest) returns (EmptyPluginResponse); - rpc RunProcedure(ProcedureRequest) returns (ProcedureResponse); -} \ No newline at end of file diff --git a/plugin/proto/plugin_service_grpc.pb.go b/plugin/proto/plugin_service_grpc.pb.go deleted file mode 100644 index 2b7ae990..00000000 --- a/plugin/proto/plugin_service_grpc.pb.go +++ /dev/null @@ -1,177 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.8 -// source: plugin/proto/plugin_service.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// PluginClient is the client API for Plugin service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type PluginClient interface { - GetModes(ctx context.Context, in *EmptyPluginResponse, opts ...grpc.CallOption) (*GetModeResponse, error) - Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*EmptyPluginResponse, error) - RunProcedure(ctx context.Context, in *ProcedureRequest, opts ...grpc.CallOption) (*ProcedureResponse, error) -} - -type pluginClient struct { - cc grpc.ClientConnInterface -} - -func NewPluginClient(cc grpc.ClientConnInterface) PluginClient { - return &pluginClient{cc} -} - -func (c *pluginClient) GetModes(ctx context.Context, in *EmptyPluginResponse, opts ...grpc.CallOption) (*GetModeResponse, error) { - out := new(GetModeResponse) - err := c.cc.Invoke(ctx, "/proto.Plugin/GetModes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *pluginClient) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*EmptyPluginResponse, error) { - out := new(EmptyPluginResponse) - err := c.cc.Invoke(ctx, "/proto.Plugin/Init", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *pluginClient) RunProcedure(ctx context.Context, in *ProcedureRequest, opts ...grpc.CallOption) (*ProcedureResponse, error) { - out := new(ProcedureResponse) - err := c.cc.Invoke(ctx, "/proto.Plugin/RunProcedure", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PluginServer is the server API for Plugin service. -// All implementations must embed UnimplementedPluginServer -// for forward compatibility -type PluginServer interface { - GetModes(context.Context, *EmptyPluginResponse) (*GetModeResponse, error) - Init(context.Context, *InitRequest) (*EmptyPluginResponse, error) - RunProcedure(context.Context, *ProcedureRequest) (*ProcedureResponse, error) - mustEmbedUnimplementedPluginServer() -} - -// UnimplementedPluginServer must be embedded to have forward compatible implementations. -type UnimplementedPluginServer struct { -} - -func (UnimplementedPluginServer) GetModes(context.Context, *EmptyPluginResponse) (*GetModeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetModes not implemented") -} -func (UnimplementedPluginServer) Init(context.Context, *InitRequest) (*EmptyPluginResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") -} -func (UnimplementedPluginServer) RunProcedure(context.Context, *ProcedureRequest) (*ProcedureResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RunProcedure not implemented") -} -func (UnimplementedPluginServer) mustEmbedUnimplementedPluginServer() {} - -// UnsafePluginServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PluginServer will -// result in compilation errors. -type UnsafePluginServer interface { - mustEmbedUnimplementedPluginServer() -} - -func RegisterPluginServer(s grpc.ServiceRegistrar, srv PluginServer) { - s.RegisterService(&Plugin_ServiceDesc, srv) -} - -func _Plugin_GetModes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EmptyPluginResponse) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PluginServer).GetModes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Plugin/GetModes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PluginServer).GetModes(ctx, req.(*EmptyPluginResponse)) - } - return interceptor(ctx, in, info, handler) -} - -func _Plugin_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PluginServer).Init(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Plugin/Init", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PluginServer).Init(ctx, req.(*InitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Plugin_RunProcedure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ProcedureRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PluginServer).RunProcedure(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Plugin/RunProcedure", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PluginServer).RunProcedure(ctx, req.(*ProcedureRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Plugin_ServiceDesc is the grpc.ServiceDesc for Plugin service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Plugin_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Plugin", - HandlerType: (*PluginServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetModes", - Handler: _Plugin_GetModes_Handler, - }, - { - MethodName: "Init", - Handler: _Plugin_Init_Handler, - }, - { - MethodName: "RunProcedure", - Handler: _Plugin_RunProcedure_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "plugin/proto/plugin_service.proto", -} diff --git a/plugin/rcon/rcon.go b/plugin/rcon/rcon.go deleted file mode 100644 index e2c88c74..00000000 --- a/plugin/rcon/rcon.go +++ /dev/null @@ -1,201 +0,0 @@ -package main - -import ( - "errors" - "io" - "time" - - "log" - - goplugin "github.com/hashicorp/go-plugin" - plugin "github.com/highcard-dev/daemon/plugin" - "github.com/highcard-dev/daemon/plugin/proto" - rconLib "github.com/highcard-dev/gorcon" -) - -type ScrollConfig struct { - Password string `yaml:"password"` - Host string `yaml:"host"` - Port int `yaml:"port"` - ConnectionMode string `yaml:"connectionMode"` // constant or short, short is e.g. for minecraft wich wants connections and then disconnects -} - -// Here is a real implementation of Rcon -type DruidPluginImpl struct { - conn *rconLib.Conn - environment *plugin.Environment - config map[string]string - connectionMode string - mainClient plugin.DruidDaemon -} - -func main() { - - log.Println("Starting RCON Plugin") - - rcon := &DruidPluginImpl{} - // pluginMap is the map of plugins we can dispense. - var pluginMap = map[string]goplugin.Plugin{ - "rcon": &plugin.DruidRpcPlugin{Impl: rcon}, - } - - log.Println("RCON Plugin started") - - goplugin.Serve(&goplugin.ServeConfig{ - HandshakeConfig: handshakeConfig, - Plugins: pluginMap, - GRPCServer: goplugin.DefaultGRPCServer, - }) -} - -func (g *DruidPluginImpl) ensureConnection(silent bool) error { - log.Println("Connecting to " + g.environment.Address) - if g.conn != nil { - return nil - } - conn, err := rconLib.Dial(g.environment.Address, g.environment.Password) - if err != nil { - if !silent { - log.Printf("Error connecting to RCON server: %s", err.Error()) - } - return err - } - - err = g.mainClient.NotifyConsole("rcon", "Connected to RCON server") - if err != nil { - log.Printf("Error notifying console: %s", err.Error()) - } - log.Println("Connected to RCON server") - g.conn = conn - return nil -} - -func (g *DruidPluginImpl) GetModes() ([]*proto.GetModeResponse_Mode, error) { - rcon := proto.GetModeResponse_Mode{Mode: "rcon", Standalone: true} - return []*proto.GetModeResponse_Mode{&rcon}, nil -} - -func (g *DruidPluginImpl) runProcedureConstant(key string, value string) (string, error) { - g.ensureConnection(false) - if g.conn == nil { - log.Println("RCON connection not established") - return "", errors.New("RCON connection not established") - - } - response, err := g.conn.Execute(value) - if err != nil { - log.Println(err.Error()) - g.conn.Close() - g.conn = nil - g.ensureConnection(false) - response, err = g.conn.Execute(value) - } - return response, err -} - -func (g *DruidPluginImpl) runProcedureShort(key string, value string) (string, error) { - - conn, err := rconLib.Dial(g.environment.Address, g.environment.Password) - if err != nil { - log.Println(err.Error()) - return "", errors.New(err.Error()) - - } - defer conn.Close() - log.Println("Connected to RCON server") - response, err := conn.Execute(value) - if err != nil { - println(err.Error()) - err = g.mainClient.NotifyConsole("rcon", "Rcon Error: "+err.Error()) - return "", err - } - err = g.mainClient.NotifyConsole("rcon", response) - return response, err -} -func (g *DruidPluginImpl) RunProcedure(key string, value string) (string, error) { - if g.connectionMode == "constant" { - return g.runProcedureConstant(key, value) - } else if g.connectionMode == "short" { - return g.runProcedureShort(key, value) - } else { - return "", errors.New("unknown connection mode") - } -} - -func (g *DruidPluginImpl) Init(config map[string]string, client plugin.DruidDaemon, cwd string, scrollConfigRawYaml string) error { - - scrollConfig, err := plugin.GetConfig[ScrollConfig]("rcon", []byte(scrollConfigRawYaml)) - - if err != nil { - return err - } - - host := scrollConfig.Host - port := scrollConfig.Port - password := scrollConfig.Password - - log.Printf("Initializing RCON Plugin with config: %v, cwd: %s", config, cwd) - - g.mainClient = client - g.config = config - - environment, err := plugin.NewPluginEnvironment(cwd, password, port, host) - if err != nil { - log.Printf("Error creating environment: %s", err.Error()) - return err - } - g.environment = environment - - if scrollConfig.ConnectionMode == "" { - g.connectionMode = "short" - err = g.mainClient.NotifyConsole("rcon", "Connection mode not set, defaulting to short\n") - if err != nil { - return err - } - } else { - g.connectionMode = scrollConfig.ConnectionMode - } - - log.Printf("Connection mode: %s", g.connectionMode) - - if g.connectionMode == "constant" { - go func() { - for { - if g.conn == nil { - log.Println("RCON connection not established, trying to connect") - - time.Sleep(time.Second) - - g.ensureConnection(true) - continue - } - packet, err := g.conn.Read() - if err != nil { - if err == io.EOF { - log.Println("RCON connection closed") - g.conn = nil - } - continue - } - err = g.mainClient.NotifyConsole("rcon", packet.Body()) - if err != nil { - log.Printf("Error notifying console: %s", err.Error()) - } - } - }() - } - - log.Println("RCON Plugin initialized") - - return nil -} - -// handshakeConfigs are used to just do a basic handshake between -// a plugin and host. If the handshake fails, a user friendly error is shown. -// This prevents users from executing bad plugins or executing a plugin -// directory. It is a UX feature, not a security feature. -var handshakeConfig = goplugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "DRUID_PLUGIN", - MagicCookieValue: "druid_is_the_way", -} diff --git a/plugin/rcon_web_rust/rcon_web_rust.go b/plugin/rcon_web_rust/rcon_web_rust.go deleted file mode 100644 index 6e9bb770..00000000 --- a/plugin/rcon_web_rust/rcon_web_rust.go +++ /dev/null @@ -1,182 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "log" - "math/rand" - "net/url" - "time" - - "github.com/hashicorp/go-plugin" - "github.com/highcard-dev/daemon/internal/utils/logger" - plugins "github.com/highcard-dev/daemon/plugin" - "github.com/highcard-dev/daemon/plugin/proto" - - "github.com/gorilla/websocket" -) - -type ScrollConfig struct { - Password string `yaml:"password"` - Host string `yaml:"host"` - Port int `yaml:"port"` -} - -type Message struct { - Identifier int32 - Message string - Name string -} - -type Response struct { - Identifier int32 - Message string - Type string - Stacktrace string -} - -type DruidPluginImpl struct { - conn *websocket.Conn - config map[string]string - environment *plugins.Environment - mainClient plugins.DruidDaemon - procedures map[int32]chan *Response -} - -func (g *DruidPluginImpl) ensureConnection() error { - if g.conn != nil { - return nil - } - u := url.URL{Scheme: "ws", Host: g.environment.Address, Path: "/" + g.environment.Password} - log.Println("Connecting to " + u.String()) - - c, _, err := websocket.DefaultDialer.Dial(u.String(), nil) - - if err != nil { - logger.Log().Error(err.Error()) - return err - } - g.mainClient.NotifyConsole("rcon_web_rust", "Connected to WebRCON") - log.Println("Connected to " + u.String()) - g.conn = c - return nil -} -func (g *DruidPluginImpl) GetModes() ([]*proto.GetModeResponse_Mode, error) { - rcon := proto.GetModeResponse_Mode{Mode: "rcon_web_rust", Standalone: true} - return []*proto.GetModeResponse_Mode{&rcon}, nil -} - -func (g *DruidPluginImpl) RunProcedure(key string, value string) (string, error) { - randId := int32(rand.Int()) - err := g.ensureConnection() - if err != nil { - logger.Log().Error(fmt.Sprintf("RCON Web connection not established: %s", err.Error())) - return "", errors.New("RCON Web connection not established") - - } - g.procedures[randId] = make(chan *Response) - m := Message{ - Identifier: randId, - Message: value, - Name: "WebRcon", - } - g.conn.WriteJSON(m) - var message *Response -loop: - for timeout := time.After(5 * time.Second); ; { - select { - case <-timeout: - return "", errors.New("execute timeout") - case m := <-g.procedures[randId]: - if m.Identifier == randId { - message = m - break loop - } - } - } - - delete(g.procedures, randId) - return message.Message, err -} - -func (g *DruidPluginImpl) Init(config map[string]string, client plugins.DruidDaemon, cwd string, scrollConfigRawYaml string) error { - - log.Println(scrollConfigRawYaml) - - scrollConfig, err := plugins.GetConfig[ScrollConfig]("rcon_web_rust", []byte(scrollConfigRawYaml)) - - g.mainClient = client - g.config = config - g.procedures = make(map[int32]chan *Response) - - host := scrollConfig.Host - port := scrollConfig.Port - password := scrollConfig.Password - - environment, err := plugins.NewPluginEnvironment(cwd, password, port, host) - if err != nil { - return err - } - g.environment = environment - g.ensureConnection() - go func() { - for { - if g.conn == nil { - log.Println("Trying to reconnect to Web RCON server") - g.ensureConnection() - time.Sleep(time.Second) - - continue - } - _, m, err := g.conn.ReadMessage() - if err != nil { - log.Println("Web RCON connection closed") - g.conn.Close() - g.conn = nil - continue - } - g.mainClient.NotifyConsole("rcon_web_rust", string(m)) - var r Response - err = json.Unmarshal([]byte(m), &r) - if err != nil { - continue - } - - if ch, ok := g.procedures[r.Identifier]; ok { - go func() { - ch <- &r - }() - } - } - }() - - log.Println("Web RCON Plugin initialized") - - return nil -} - -// handshakeConfigs are used to just do a basic handshake between -// a plugin and host. If the handshake fails, a user friendly error is shown. -// This prevents users from executing bad plugins or executing a plugin -// directory. It is a UX feature, not a security feature. -var handshakeConfig = plugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "DRUID_PLUGIN", - MagicCookieValue: "druid_is_the_way", -} - -func main() { - rcon := &DruidPluginImpl{} - // pluginMap is the map of plugins we can dispense. - var pluginMap = map[string]plugin.Plugin{ - "rcon_web_rust": &plugins.DruidRpcPlugin{Impl: rcon}, - } - - log.Println("RCON Web Plugin started") - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: handshakeConfig, - Plugins: pluginMap, - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/plugin/runDebug.sh b/plugin/runDebug.sh deleted file mode 100644 index 6e17ecfe..00000000 --- a/plugin/runDebug.sh +++ /dev/null @@ -1,14 +0,0 @@ -#echo "Running plugin in debug mode $1 $2 $3 $4 $5 $6 $7 $8 $9" - -#set magicCookie=magicValue environemnt variables from HandshakeConfig -#export TEST_PLUGIN=cookie_value -#set plugin vars -export PLUGIN_MIN_PORT=10000 -export PLUGIN_MAX_PORT=25000 -export PLUGIN_PROTOCOL_VERSIONS=1 - -#make sure plugin output is "original" without debugger messages by passing log-dest & tty arguments -dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient \ - --log-dest "dlv.log" \ - --tty="" \ - exec $3 -- "$@" \ No newline at end of file diff --git a/scripts/build_coldstarter_image.sh b/scripts/build_coldstarter_image.sh new file mode 100755 index 00000000..b1d23e4f --- /dev/null +++ b/scripts/build_coldstarter_image.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -euo pipefail + +IMAGE="${IMAGE:-druid-coldstarter:local}" +VERSION="${VERSION:-local}" +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +echo "Building local coldstarter image: ${IMAGE}" +docker build \ + --file "${ROOT_DIR}/Dockerfile.coldstarter" \ + --build-arg "VERSION=${VERSION}" \ + --tag "${IMAGE}" \ + "${ROOT_DIR}" + +echo "Built ${IMAGE}" diff --git a/scripts/validate_all_scrolls.sh b/scripts/validate_all_scrolls.sh index f59e9080..54f808b1 100755 --- a/scripts/validate_all_scrolls.sh +++ b/scripts/validate_all_scrolls.sh @@ -2,9 +2,8 @@ set -e -ALL_SCROLL_DIRS=$(find . -type f -name "scroll.yaml" -exec dirname {} \; | sort | uniq) - -for SCROLL_DIR in $ALL_SCROLL_DIRS; do +for SCROLL_FILE in examples/*/scroll.yaml; do + SCROLL_DIR=$(dirname "$SCROLL_FILE") echo "Validating $SCROLL_DIR" - go run main.go scroll validate $SCROLL_DIR -done \ No newline at end of file + go run ./apps/druid validate --strict "$SCROLL_DIR" +done diff --git a/test/integration/commands/serve_coldstarter_test.go b/test/integration/commands/serve_coldstarter_test.go deleted file mode 100644 index 8428003d..00000000 --- a/test/integration/commands/serve_coldstarter_test.go +++ /dev/null @@ -1,181 +0,0 @@ -//go:build integration - -package command_test - -import ( - "context" - "errors" - "os" - "testing" - "time" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/utils/logger" - test_utils "github.com/highcard-dev/daemon/test/utils" -) - -var genericHandler = "generic" -var testHandler = "test.lua" - -var luaHandlerContent = ` -function handle(ctx, data) - if data == "test" then - sendData("testback") - finish() - end -end -` - -var testCommand = map[string]*domain.CommandInstructionSet{ - "start": { - Procedures: []*domain.Procedure{ - { - Mode: "exec", - Data: []string{"touch", "test.txt"}, - }, - }, - }, -} - -func TestColdstarterServeCommand(t *testing.T) { - - type TestCase struct { - Name string - Scroll domain.File - ExecColdStarterFn func(string, int) error - LuaHandlerContent string - } - var testCases = []TestCase{ - - { - Name: "TestServeColdstarterEmtpty", - Scroll: domain.File{ - Ports: []domain.Port{}, - Serve: "start", - Commands: testCommand, - }, - }, - /*{ - Name: "TestServeColdstarterWithoutHandler", - Scroll: domain.File{ - Ports: []domain.Port{ - { - Port: 12350, - Name: "testport", - Protocol: "tcp", - }, - }, - Serve: "start", - Commands: testCommand, - }, - ExecColdStarterFn: test_utils.NoTcpTester, - }, - { - Name: "TestServeColdstarterWithoutHandler2", - Scroll: domain.File{ - Ports: []domain.Port{ - { - Port: 12350, - Name: "testport", - Protocol: "tcp", - }, - { - Port: 12351, - Name: "testport2", - Protocol: "tcp", - }, - }, - Serve: "start", - Commands: testCommand, - }, - ExecColdStarterFn: test_utils.NoTcpTester, - },*/{ - Name: "TestServeColdstarterWithGenericTCPHandler", - Scroll: domain.File{ - Ports: []domain.Port{ - { - Port: 12352, - Name: "testport", - Protocol: "tcp", - SleepHandler: &genericHandler, - }, - }, - Serve: "start", - Commands: testCommand, - }, - ExecColdStarterFn: test_utils.TcpTester, - }, { - Name: "TestServeColdstarterWithTestLuaTCPHandler", - Scroll: domain.File{ - Ports: []domain.Port{ - { - Port: 12353, - Name: "testport", - Protocol: "tcp", - SleepHandler: &testHandler, - }, - }, - Serve: "start", - Commands: testCommand, - }, - LuaHandlerContent: luaHandlerContent, - ExecColdStarterFn: test_utils.TcpTester, - }, - { - Name: "TestServeColdstarterWithGenericUDPHandler", - Scroll: domain.File{ - Ports: []domain.Port{ - { - Port: 12354, - Name: "testport", - Protocol: "udp", - SleepHandler: &genericHandler, - }, - }, - Serve: "start", - Commands: testCommand, - }, - ExecColdStarterFn: test_utils.UdpTester, - }, - } - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - logger.Log(logger.WithStructuredLogging()) - println(tc.Name) - scrollPath, path := test_utils.SetupScroll(t, tc.Scroll) - defer os.RemoveAll(path) - - if tc.LuaHandlerContent != "" { - err := os.WriteFile(scrollPath+testHandler, []byte(tc.LuaHandlerContent), 0644) - if err != nil { - t.Fatalf("Failed to write test lua handler file: %v", err) - } - } - ctx, cancel := context.WithCancelCause(context.WithValue(context.Background(), "disablePrometheus", true)) - - defer cancel(errors.New("test ended")) - - test_utils.SetupServeCmd(ctx, t, path, []string{"--coldstarter"}) - - if tc.ExecColdStarterFn != nil { - //wait for server to start, maybe we can do this better, but we cannot do a tcp dial or somthing like that - time.Sleep(1 * time.Second) - var err error - if tc.LuaHandlerContent != "" { - err = tc.ExecColdStarterFn("testback", tc.Scroll.Ports[0].Port) - } else { - err = tc.ExecColdStarterFn("", tc.Scroll.Ports[0].Port) - } - if err != nil { - t.Fatalf("Failed to execute coldstarter function: %v", err) - } - } - - err := test_utils.WaitUntilFileExists(path+"data/test.txt", 15*time.Second) - if err != nil { - t.Fatalf("Failed to wait for test.txt to be created: %v", err) - } - }) - - } -} diff --git a/test/integration/commands/serve_idle_test.go b/test/integration/commands/serve_idle_test.go deleted file mode 100644 index a694efae..00000000 --- a/test/integration/commands/serve_idle_test.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build integration - -package command_test - -import ( - "bytes" - "context" - "errors" - "os" - "strconv" - "testing" - "time" - - "github.com/highcard-dev/daemon/cmd" - "github.com/highcard-dev/daemon/internal/utils/logger" -) - -func TestServeIdleCommand(t *testing.T) { - - type TestCase struct { - Name string - Args []string - ExpectedErr error - } - - var testCases = []TestCase{ - { - Name: "TestServeNoArtifact", - Args: []string{"serve"}, - ExpectedErr: errors.New("no artifact provided"), - }, - - { - Name: "TestServeNoArtifactTag", - Args: []string{"serve", "invalidscrollwithouttag"}, - ExpectedErr: errors.New("reference (tag or digest) must be set"), - }, - { - Name: "TestServeNoValidArtifact", - Args: []string{"serve", "invalidscroll:withtag"}, - ExpectedErr: errors.New("invalid reference: missing registry or repository"), - }, - } - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - //return - //observer := logger.SetupLogsCapture() - - logger.Log(logger.WithStructuredLogging()) - - unixTime := time.Now().Unix() - path := "./druid-cli-test/" + strconv.FormatInt(unixTime, 10) + "/" - - if err := os.MkdirAll(path, 0755); err != nil { - t.Fatalf("Failed to create test cwd: %v", err) - } - defer os.RemoveAll(path) - - b := bytes.NewBufferString("") - - rootCmd := cmd.RootCmd - rootCmd.SetErr(b) - rootCmd.SetOut(b) - rootCmd.SetArgs(append([]string{"--cwd", path}, tc.Args...)) - - ctx := context.WithValue(context.Background(), "disablePrometheus", true) - - serveCmd, _, err := rootCmd.Find([]string{"serve"}) - if err != nil { - t.Fatalf("Failed to find serve command: %v", err) - } - serveCmd.SetContext(ctx) - - err = rootCmd.ExecuteContext(ctx) - - if err != nil { - if tc.ExpectedErr == nil { - t.Fatalf("Unexpected error: %v", err) - } else { - if err.Error() != tc.ExpectedErr.Error() { - t.Fatalf("Expected error: %v, got: %v", tc.ExpectedErr, err) - } - } - } - - }) - - } -} diff --git a/test/integration/commands/serve_test.go b/test/integration/commands/serve_test.go deleted file mode 100644 index 0cef03dd..00000000 --- a/test/integration/commands/serve_test.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build integration - -package command_test - -import ( - "bytes" - "context" - "os" - "strconv" - "testing" - "time" - - "github.com/highcard-dev/daemon/cmd" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/utils/logger" - test_utils "github.com/highcard-dev/daemon/test/utils" - "github.com/otiai10/copy" - "gopkg.in/yaml.v2" -) - -func TestServeCommand(t *testing.T) { - - type TestCase struct { - Name string - ScrollFile string - Restarts int - RunModeOverwrite domain.RunMode - } - var testCases = []TestCase{ - { - Name: "TestServeFull", - ScrollFile: "../../../examples/minecraft/scroll.yaml", - Restarts: 0, - }, - { - Name: "TestServeFull With Restart", - ScrollFile: "../../../examples/minecraft/scroll.yaml", - Restarts: 1, - }, - { - Name: "TestServeFull With 3 Restarts", - ScrollFile: "../../../examples/minecraft/scroll.yaml", - Restarts: 3, - }, - { - Name: "TestServeFull With Restart (Persistent)", - ScrollFile: "../../../examples/minecraft/scroll.yaml", - Restarts: 1, - RunModeOverwrite: domain.RunModePersistent, - }, - } - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - logger.Log(logger.WithStructuredLogging()) - - time.Sleep(10 * time.Second) - - //observer := logger.SetupLogsCapture() - unixTime := time.Now().Unix() - path := "./druid-cli-test/" + strconv.FormatInt(unixTime, 10) + "/" - - err := copy.Copy(tc.ScrollFile, path+"scroll.yaml") - if err != nil { - t.Fatalf("Failed to copy test scroll file: %v", err) - } - - if tc.RunModeOverwrite != "" { - //overwrite "restart" with RunModeOverwrite - scroll, err := domain.NewScroll(path) - if err != nil { - t.Fatalf("Failed to read scroll file: %v", err) - } - for i, command := range scroll.File.Commands { - if command.Run == domain.RunModeRestart { - scroll.File.Commands[i].Run = domain.RunMode(tc.RunModeOverwrite) - } - } - scrollBytes, err := yaml.Marshal(scroll.File) - if err != nil { - t.Fatalf("Failed to marshal scroll file: %v", err) - } - err = os.WriteFile(path+"scroll.yaml", scrollBytes, 0644) - if err != nil { - t.Fatalf("Failed to write scroll file: %v", err) - } - } - - if err := os.MkdirAll(path, 0755); err != nil { - t.Fatalf("Failed to create test cwd: %v", err) - } - defer os.RemoveAll(path) - - runs := tc.Restarts + 1 - - var installDate int64 - - for i := 0; i < runs; i++ { - var connected bool - - b := bytes.NewBufferString("") - - rootCmd := cmd.RootCmd - rootCmd.SetErr(b) - rootCmd.SetOut(b) - rootCmd.SetArgs([]string{"--cwd", path, "serve", "--coldstarter=false"}) - - ctx, cancel := context.WithCancel(context.WithValue(context.Background(), "disablePrometheus", true)) - - defer cancel() - - logger.Log().Info("Starting serve command") - - connected, err = test_utils.StartAndTestServeCommand(ctx, t, rootCmd) - - if !connected { - t.Fatalf("Failed to connect to daemon web server: %v", err) - } - - err = test_utils.WaitForConsoleRunning("start.0", 180*time.Second) - if err != nil { - t.Fatalf("Failed to start console: %v", err) - } - - wsClient, err := test_utils.WaitForWebsocketConnection("localhost:8081", "/ws/v1/serve/start-process", 60*time.Second) - if err != nil { - t.Fatalf("Failed to connect to ws server: %v", err) - } - - err = test_utils.WaitForWebsocketMessage(wsClient, `For help, type "help"`, 60*time.Second) - t.Log("Console message received") - if err != nil { - t.Fatalf("Failed to get help message: %v", err) - } - - err = test_utils.ConnectionTest("localhost:25565", true) - - if err != nil { - t.Fatalf("Failed to connect to minecraft server: %v", err) - } - - t.Log("Connected to minecraft server") - - //double check that install was never run again - lock, err := domain.ReadLock(path + "scroll-lock.json") - if err != nil { - t.Fatalf("Failed to read lock file: %v", err) - } - t.Log("Read lock file") - - if installDate == 0 { - installDate = lock.GetStatus("install").LastStatusChange - - if installDate == 0 { - t.Fatalf("Failed to get install date") - } - } else { - if installDate != lock.GetStatus("install").LastStatusChange { - t.Fatalf("Install command was run again") - } - } - - go func() { - <-ctx.Done() - }() - - t.Log("Stopping daemon server") - - cancel() - - err = test_utils.CheckHttpServerShutdown(8081, 120*time.Second) - if err != nil { - t.Fatalf("Failed to stop daemon server, server still online") - } - - lock, err = domain.ReadLock(path + "scroll-lock.json") - if err != nil { - t.Fatalf("Failed to read lock file: %v", err) - } - - expectedStatuses := map[string]domain.ScrollLockStatus{ - "install": "done", - "start": "waiting", - } - if tc.RunModeOverwrite == domain.RunModePersistent { - expectedStatuses["start"] = "done" - } - - for command, status := range expectedStatuses { - s := lock.GetStatus(command) - if s.Status != status { - t.Fatalf("Lock file status %s not found, expected: %v, got: %v", status, expectedStatuses, lock.Statuses) - } - } - - t.Log("Stopped daemon server, lock file status looks good") - - } - }) - - } -} diff --git a/test/integration/commands/serve_watch_ports_test.go b/test/integration/commands/serve_watch_ports_test.go deleted file mode 100644 index 3290e5bc..00000000 --- a/test/integration/commands/serve_watch_ports_test.go +++ /dev/null @@ -1,108 +0,0 @@ -//go:build integration - -package command_test - -import ( - "context" - "errors" - "fmt" - "os" - "runtime" - "testing" - "time" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/utils/logger" - test_utils "github.com/highcard-dev/daemon/test/utils" -) - -var testCommandTCP = func() map[string]*domain.CommandInstructionSet { - var ncCommand = []string{"nc", "-l", "-p", "12349"} - if runtime.GOOS == "darwin" { - ncCommand = []string{"nc", "-l", "12349"} - } - return map[string]*domain.CommandInstructionSet{ - "start": { - Procedures: []*domain.Procedure{ - { - Mode: "exec", - Data: ncCommand, - }, - }, - }, - } -} - -func TestWatchPortsServeCommand(t *testing.T) { - - type TestCase struct { - Name string - Scroll domain.File - } - var testCases = []TestCase{ - { - Name: "TestServeWaitPortsCommandTCP", - Scroll: domain.File{ - Ports: []domain.Port{ - { - Port: 12349, - Name: "testport", - Protocol: "tcp", - CheckActivity: true, - }, - }, - Serve: "start", - Commands: testCommandTCP(), - }, - }, - } - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - logger.Log(logger.WithStructuredLogging()) - - _, path := test_utils.SetupScroll(t, tc.Scroll) - defer os.RemoveAll(path) - - ctx, cancel := context.WithCancelCause(context.WithValue(context.Background(), "disablePrometheus", true)) - defer cancel(errors.New("test ended")) - - test_utils.SetupServeCmd(ctx, t, path, []string{"--coldstarter=false", "--watch-ports"}) - //give time to make sure everything is online - time.Sleep(1 * time.Second) - ap1, err := test_utils.FetchPorts() - if err != nil { - t.Fatalf("Failed to fetch ports: %v", err) - } - - fmt.Printf("Ports: %v\n", ap1) - - for _, p := range ap1 { - if !p.Open { - t.Fatalf("Port %d is not open", p.Port.Port) - } - } - //give time to to get picked up by the watcher - time.Sleep(1 * time.Second) - - err = test_utils.TcpTester("", 12349) - if err != nil { - t.Fatalf("Failed to test tcp: %v", err) - } - - //give time to to get picked up by the watcher - time.Sleep(1 * time.Second) - - ap2, err := test_utils.FetchPorts() - if err != nil { - t.Fatalf("Failed to fetch ports: %v", err) - } - - for idx, p := range ap2 { - if p.InactiveSince == ap1[idx].InactiveSince { - t.Fatalf("InactiveSince did not change for port %d (both: %s)", p.Port.Port, p.InactiveSince) - } - } - - }) - } -} diff --git a/test/integration/example_test.go b/test/integration/example_test.go index 29d22972..883181b3 100644 --- a/test/integration/example_test.go +++ b/test/integration/example_test.go @@ -18,26 +18,20 @@ import ( ) type ServiceConfig struct { - ServiceName string - ExamplePath string - TestAddress string - TestName string - LockFileStatus []string - UseLogSpy bool - LogSpy func(string, []byte) bool + ServiceName string + ExamplePath string + TestAddress string + TestName string + CommandStatus []string + UseLogSpy bool + LogSpy func(string, []byte) bool } -func checkLockFile(scrollService *services.ScrollService, config ServiceConfig) error { - - lock, err := scrollService.GetLock() - - if err != nil { - return err - } - - for _, status := range config.LockFileStatus { - if _, ok := lock.Statuses[status]; !ok { - return fmt.Errorf("Lock file status %s not found, expected: %v, got: %v", status, config.LockFileStatus, lock.Statuses) +func checkQueue(queueManager *services.QueueManager, config ServiceConfig) error { + queue := queueManager.GetQueue() + for _, status := range config.CommandStatus { + if _, ok := queue[status]; !ok { + return fmt.Errorf("command status %s not found, expected: %v, got: %v", status, config.CommandStatus, queue) } } return nil @@ -47,23 +41,23 @@ func TestExamples(t *testing.T) { configs := []ServiceConfig{ { - ServiceName: "minecraft", - ExamplePath: "../../examples/minecraft/scroll.yaml", - TestAddress: "localhost:25565", - TestName: "Minecraft", - LockFileStatus: []string{"start", "install"}, - UseLogSpy: true, + ServiceName: "minecraft", + ExamplePath: "../../examples/minecraft/scroll.yaml", + TestAddress: "localhost:25565", + TestName: "Minecraft", + CommandStatus: []string{"start", "install"}, + UseLogSpy: true, LogSpy: func(stream string, sc []byte) bool { println(string(sc)) return strings.Contains(string(sc), `For help, type "help"`) }, }, { - ServiceName: "nginx", - ExamplePath: "../../examples/nginx/scroll.yaml", - TestAddress: "localhost:80", - TestName: "Nginx", - LockFileStatus: []string{"start"}, + ServiceName: "nginx", + ExamplePath: "../../examples/nginx/scroll.yaml", + TestAddress: "localhost:80", + TestName: "Nginx", + CommandStatus: []string{"start"}, }, // Add more services here } @@ -73,11 +67,6 @@ func TestExamples(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() logManager := mock_ports.NewMockLogManagerInterface(ctrl) - ociRegistryMock := mock_ports.NewMockOciRegistryInterface(ctrl) - pluginManager := mock_ports.NewMockPluginManagerInterface(ctrl) - - pluginManager.EXPECT().HasMode(gomock.Any()).Return(false).AnyTimes() - logDoneChan := make(chan struct{}, 1) logManager.EXPECT().AddLine(gomock.Any(), gomock.Any()).DoAndReturn(func(stream string, sc []byte) { @@ -114,10 +103,10 @@ func TestExamples(t *testing.T) { t.Error(err) return } - consoleManager := services.NewConsoleManager(logManager) - processMonitor := test_utils.GetMockedProcessMonitor(ctrl) - processManager := services.NewProcessManager(logManager, consoleManager, processMonitor) - procedureLauncher, err := services.NewProcedureLauncher(ociRegistryMock, processManager, pluginManager, consoleManager, logManager, scrollService, "external") + runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) + exitCode := 0 + runtimeBackend.EXPECT().RunCommand(gomock.Any()).Return(&exitCode, nil).AnyTimes() + procedureLauncher, err := services.NewProcedureLauncher(scrollService, runtimeBackend, "/tmp") if err != nil { t.Error(err) return @@ -126,9 +115,6 @@ func TestExamples(t *testing.T) { go queueManager.Work() - scrollService.WriteNewScrollLock() - scrollService.ReloadLock(false) - err = queueManager.AddAndRememberItem("start") if err != nil { @@ -152,7 +138,7 @@ func TestExamples(t *testing.T) { t.Error("Failed to test to server: ", err) } - err = checkLockFile(scrollService, config) + err = checkQueue(queueManager, config) if err != nil { t.Error(err) return @@ -172,7 +158,7 @@ func TestExamples(t *testing.T) { } } - err = checkLockFile(scrollService, config) + err = checkQueue(queueManager, config) if err != nil { t.Error(err) return diff --git a/test/mock/services.go b/test/mock/services.go index 67716a0d..823fbfd0 100644 --- a/test/mock/services.go +++ b/test/mock/services.go @@ -114,18 +114,6 @@ func (m *MockScrollServiceInterface) EXPECT() *MockScrollServiceInterfaceMockRec return m.recorder } -// AddTemporaryCommand mocks base method. -func (m *MockScrollServiceInterface) AddTemporaryCommand(cmd string, instructions *domain.CommandInstructionSet) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddTemporaryCommand", cmd, instructions) -} - -// AddTemporaryCommand indicates an expected call of AddTemporaryCommand. -func (mr *MockScrollServiceInterfaceMockRecorder) AddTemporaryCommand(cmd, instructions any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTemporaryCommand", reflect.TypeOf((*MockScrollServiceInterface)(nil).AddTemporaryCommand), cmd, instructions) -} - // GetCommand mocks base method. func (m *MockScrollServiceInterface) GetCommand(cmd string) (*domain.CommandInstructionSet, error) { m.ctrl.T.Helper() @@ -197,49 +185,6 @@ func (mr *MockScrollServiceInterfaceMockRecorder) GetFile() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFile", reflect.TypeOf((*MockScrollServiceInterface)(nil).GetFile)) } -// GetLock mocks base method. -func (m *MockScrollServiceInterface) GetLock() (*domain.ScrollLock, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLock") - ret0, _ := ret[0].(*domain.ScrollLock) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLock indicates an expected call of GetLock. -func (mr *MockScrollServiceInterfaceMockRecorder) GetLock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLock", reflect.TypeOf((*MockScrollServiceInterface)(nil).GetLock)) -} - -// GetScrollConfigRawYaml mocks base method. -func (m *MockScrollServiceInterface) GetScrollConfigRawYaml() []byte { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetScrollConfigRawYaml") - ret0, _ := ret[0].([]byte) - return ret0 -} - -// GetScrollConfigRawYaml indicates an expected call of GetScrollConfigRawYaml. -func (mr *MockScrollServiceInterfaceMockRecorder) GetScrollConfigRawYaml() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScrollConfigRawYaml", reflect.TypeOf((*MockScrollServiceInterface)(nil).GetScrollConfigRawYaml)) -} - -// WriteNewScrollLock mocks base method. -func (m *MockScrollServiceInterface) WriteNewScrollLock() *domain.ScrollLock { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteNewScrollLock") - ret0, _ := ret[0].(*domain.ScrollLock) - return ret0 -} - -// WriteNewScrollLock indicates an expected call of WriteNewScrollLock. -func (mr *MockScrollServiceInterfaceMockRecorder) WriteNewScrollLock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteNewScrollLock", reflect.TypeOf((*MockScrollServiceInterface)(nil).WriteNewScrollLock)) -} - // MockProcedureLauchnerInterface is a mock of ProcedureLauchnerInterface interface. type MockProcedureLauchnerInterface struct { ctrl *gomock.Controller @@ -278,143 +223,18 @@ func (mr *MockProcedureLauchnerInterfaceMockRecorder) GetProcedureStatuses() *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProcedureStatuses", reflect.TypeOf((*MockProcedureLauchnerInterface)(nil).GetProcedureStatuses)) } -// LaunchPlugins mocks base method. -func (m *MockProcedureLauchnerInterface) LaunchPlugins() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LaunchPlugins") - ret0, _ := ret[0].(error) - return ret0 -} - -// LaunchPlugins indicates an expected call of LaunchPlugins. -func (mr *MockProcedureLauchnerInterfaceMockRecorder) LaunchPlugins() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LaunchPlugins", reflect.TypeOf((*MockProcedureLauchnerInterface)(nil).LaunchPlugins)) -} - // Run mocks base method. -func (m *MockProcedureLauchnerInterface) Run(cmd string, runCommandCb func(string) error) error { +func (m *MockProcedureLauchnerInterface) Run(cmd string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Run", cmd, runCommandCb) + ret := m.ctrl.Call(m, "Run", cmd) ret0, _ := ret[0].(error) return ret0 } // Run indicates an expected call of Run. -func (mr *MockProcedureLauchnerInterfaceMockRecorder) Run(cmd, runCommandCb any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockProcedureLauchnerInterface)(nil).Run), cmd, runCommandCb) -} - -// RunProcedure mocks base method. -func (m *MockProcedureLauchnerInterface) RunProcedure(arg0 *domain.Procedure, arg1 string, arg2 []string) (string, *int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunProcedure", arg0, arg1, arg2) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(*int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// RunProcedure indicates an expected call of RunProcedure. -func (mr *MockProcedureLauchnerInterfaceMockRecorder) RunProcedure(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockProcedureLauchnerInterfaceMockRecorder) Run(cmd any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunProcedure", reflect.TypeOf((*MockProcedureLauchnerInterface)(nil).RunProcedure), arg0, arg1, arg2) -} - -// MockPluginManagerInterface is a mock of PluginManagerInterface interface. -type MockPluginManagerInterface struct { - ctrl *gomock.Controller - recorder *MockPluginManagerInterfaceMockRecorder - isgomock struct{} -} - -// MockPluginManagerInterfaceMockRecorder is the mock recorder for MockPluginManagerInterface. -type MockPluginManagerInterfaceMockRecorder struct { - mock *MockPluginManagerInterface -} - -// NewMockPluginManagerInterface creates a new mock instance. -func NewMockPluginManagerInterface(ctrl *gomock.Controller) *MockPluginManagerInterface { - mock := &MockPluginManagerInterface{ctrl: ctrl} - mock.recorder = &MockPluginManagerInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPluginManagerInterface) EXPECT() *MockPluginManagerInterfaceMockRecorder { - return m.recorder -} - -// CanRunStandaloneProcedure mocks base method. -func (m *MockPluginManagerInterface) CanRunStandaloneProcedure(mode string) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CanRunStandaloneProcedure", mode) - ret0, _ := ret[0].(bool) - return ret0 -} - -// CanRunStandaloneProcedure indicates an expected call of CanRunStandaloneProcedure. -func (mr *MockPluginManagerInterfaceMockRecorder) CanRunStandaloneProcedure(mode any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CanRunStandaloneProcedure", reflect.TypeOf((*MockPluginManagerInterface)(nil).CanRunStandaloneProcedure), mode) -} - -// GetNotifyConsoleChannel mocks base method. -func (m *MockPluginManagerInterface) GetNotifyConsoleChannel() chan *domain.StreamItem { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNotifyConsoleChannel") - ret0, _ := ret[0].(chan *domain.StreamItem) - return ret0 -} - -// GetNotifyConsoleChannel indicates an expected call of GetNotifyConsoleChannel. -func (mr *MockPluginManagerInterfaceMockRecorder) GetNotifyConsoleChannel() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotifyConsoleChannel", reflect.TypeOf((*MockPluginManagerInterface)(nil).GetNotifyConsoleChannel)) -} - -// HasMode mocks base method. -func (m *MockPluginManagerInterface) HasMode(mode string) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasMode", mode) - ret0, _ := ret[0].(bool) - return ret0 -} - -// HasMode indicates an expected call of HasMode. -func (mr *MockPluginManagerInterfaceMockRecorder) HasMode(mode any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasMode", reflect.TypeOf((*MockPluginManagerInterface)(nil).HasMode), mode) -} - -// ParseFromScroll mocks base method. -func (m *MockPluginManagerInterface) ParseFromScroll(pluginDefinitionMap map[string]map[string]string, config, cwd string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ParseFromScroll", pluginDefinitionMap, config, cwd) - ret0, _ := ret[0].(error) - return ret0 -} - -// ParseFromScroll indicates an expected call of ParseFromScroll. -func (mr *MockPluginManagerInterfaceMockRecorder) ParseFromScroll(pluginDefinitionMap, config, cwd any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseFromScroll", reflect.TypeOf((*MockPluginManagerInterface)(nil).ParseFromScroll), pluginDefinitionMap, config, cwd) -} - -// RunProcedure mocks base method. -func (m *MockPluginManagerInterface) RunProcedure(mode, value string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunProcedure", mode, value) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RunProcedure indicates an expected call of RunProcedure. -func (mr *MockPluginManagerInterfaceMockRecorder) RunProcedure(mode, value any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunProcedure", reflect.TypeOf((*MockPluginManagerInterface)(nil).RunProcedure), mode, value) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockProcedureLauchnerInterface)(nil).Run), cmd) } // MockLogManagerInterface is a mock of LogManagerInterface interface. @@ -467,100 +287,154 @@ func (mr *MockLogManagerInterfaceMockRecorder) GetStreams() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStreams", reflect.TypeOf((*MockLogManagerInterface)(nil).GetStreams)) } -// MockProcessManagerInterface is a mock of ProcessManagerInterface interface. -type MockProcessManagerInterface struct { +// MockRuntimeBackendInterface is a mock of RuntimeBackendInterface interface. +type MockRuntimeBackendInterface struct { ctrl *gomock.Controller - recorder *MockProcessManagerInterfaceMockRecorder + recorder *MockRuntimeBackendInterfaceMockRecorder isgomock struct{} } -// MockProcessManagerInterfaceMockRecorder is the mock recorder for MockProcessManagerInterface. -type MockProcessManagerInterfaceMockRecorder struct { - mock *MockProcessManagerInterface +// MockRuntimeBackendInterfaceMockRecorder is the mock recorder for MockRuntimeBackendInterface. +type MockRuntimeBackendInterfaceMockRecorder struct { + mock *MockRuntimeBackendInterface } -// NewMockProcessManagerInterface creates a new mock instance. -func NewMockProcessManagerInterface(ctrl *gomock.Controller) *MockProcessManagerInterface { - mock := &MockProcessManagerInterface{ctrl: ctrl} - mock.recorder = &MockProcessManagerInterfaceMockRecorder{mock} +// NewMockRuntimeBackendInterface creates a new mock instance. +func NewMockRuntimeBackendInterface(ctrl *gomock.Controller) *MockRuntimeBackendInterface { + mock := &MockRuntimeBackendInterface{ctrl: ctrl} + mock.recorder = &MockRuntimeBackendInterfaceMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProcessManagerInterface) EXPECT() *MockProcessManagerInterfaceMockRecorder { +func (m *MockRuntimeBackendInterface) EXPECT() *MockRuntimeBackendInterfaceMockRecorder { return m.recorder } -// GetRunningProcess mocks base method. -func (m *MockProcessManagerInterface) GetRunningProcess(commandName string) *domain.Process { +// Attach mocks base method. +func (m *MockRuntimeBackendInterface) Attach(commandName, data string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRunningProcess", commandName) - ret0, _ := ret[0].(*domain.Process) + ret := m.ctrl.Call(m, "Attach", commandName, data) + ret0, _ := ret[0].(error) return ret0 } -// GetRunningProcess indicates an expected call of GetRunningProcess. -func (mr *MockProcessManagerInterfaceMockRecorder) GetRunningProcess(commandName any) *gomock.Call { +// Attach indicates an expected call of Attach. +func (mr *MockRuntimeBackendInterfaceMockRecorder) Attach(commandName, data any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRunningProcess", reflect.TypeOf((*MockProcessManagerInterface)(nil).GetRunningProcess), commandName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Attach), commandName, data) } -// GetRunningProcesses mocks base method. -func (m *MockProcessManagerInterface) GetRunningProcesses() map[string]*domain.Process { +// ExpectedPorts mocks base method. +func (m *MockRuntimeBackendInterface) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRunningProcesses") - ret0, _ := ret[0].(map[string]*domain.Process) + ret := m.ctrl.Call(m, "ExpectedPorts", dataRoot, commands, globalPorts) + ret0, _ := ret[0].([]domain.RuntimePortStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExpectedPorts indicates an expected call of ExpectedPorts. +func (mr *MockRuntimeBackendInterfaceMockRecorder) ExpectedPorts(dataRoot, commands, globalPorts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpectedPorts", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).ExpectedPorts), dataRoot, commands, globalPorts) +} + +// Name mocks base method. +func (m *MockRuntimeBackendInterface) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) return ret0 } -// GetRunningProcesses indicates an expected call of GetRunningProcesses. -func (mr *MockProcessManagerInterfaceMockRecorder) GetRunningProcesses() *gomock.Call { +// Name indicates an expected call of Name. +func (mr *MockRuntimeBackendInterfaceMockRecorder) Name() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRunningProcesses", reflect.TypeOf((*MockProcessManagerInterface)(nil).GetRunningProcesses)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Name)) } -// Run mocks base method. -func (m *MockProcessManagerInterface) Run(commandName string, command []string, dir string) (*int, error) { +// ReadScrollFile mocks base method. +func (m *MockRuntimeBackendInterface) ReadScrollFile(scrollRoot string) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Run", commandName, command, dir) - ret0, _ := ret[0].(*int) + ret := m.ctrl.Call(m, "ReadScrollFile", scrollRoot) + ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } -// Run indicates an expected call of Run. -func (mr *MockProcessManagerInterfaceMockRecorder) Run(commandName, command, dir any) *gomock.Call { +// ReadScrollFile indicates an expected call of ReadScrollFile. +func (mr *MockRuntimeBackendInterfaceMockRecorder) ReadScrollFile(scrollRoot any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockProcessManagerInterface)(nil).Run), commandName, command, dir) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadScrollFile", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).ReadScrollFile), scrollRoot) } -// RunTty mocks base method. -func (m *MockProcessManagerInterface) RunTty(comandName string, command []string, dir string) (*int, error) { +// RunCommand mocks base method. +func (m *MockRuntimeBackendInterface) RunCommand(command ports.RuntimeCommand) (*int, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunTty", comandName, command, dir) + ret := m.ctrl.Call(m, "RunCommand", command) ret0, _ := ret[0].(*int) ret1, _ := ret[1].(error) return ret0, ret1 } -// RunTty indicates an expected call of RunTty. -func (mr *MockProcessManagerInterfaceMockRecorder) RunTty(comandName, command, dir any) *gomock.Call { +// RunCommand indicates an expected call of RunCommand. +func (mr *MockRuntimeBackendInterfaceMockRecorder) RunCommand(command any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunTty", reflect.TypeOf((*MockProcessManagerInterface)(nil).RunTty), comandName, command, dir) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunCommand", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).RunCommand), command) } -// WriteStdin mocks base method. -func (m *MockProcessManagerInterface) WriteStdin(process *domain.Process, data string) error { +// Signal mocks base method. +func (m *MockRuntimeBackendInterface) Signal(commandName, target, signal, dataRoot string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteStdin", process, data) + ret := m.ctrl.Call(m, "Signal", commandName, target, signal, dataRoot) ret0, _ := ret[0].(error) return ret0 } -// WriteStdin indicates an expected call of WriteStdin. -func (mr *MockProcessManagerInterfaceMockRecorder) WriteStdin(process, data any) *gomock.Call { +// Signal indicates an expected call of Signal. +func (mr *MockRuntimeBackendInterfaceMockRecorder) Signal(commandName, target, signal, dataRoot any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Signal", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Signal), commandName, target, signal, dataRoot) +} + +// MockRuntimeMaterializerInterface is a mock of RuntimeMaterializerInterface interface. +type MockRuntimeMaterializerInterface struct { + ctrl *gomock.Controller + recorder *MockRuntimeMaterializerInterfaceMockRecorder + isgomock struct{} +} + +// MockRuntimeMaterializerInterfaceMockRecorder is the mock recorder for MockRuntimeMaterializerInterface. +type MockRuntimeMaterializerInterfaceMockRecorder struct { + mock *MockRuntimeMaterializerInterface +} + +// NewMockRuntimeMaterializerInterface creates a new mock instance. +func NewMockRuntimeMaterializerInterface(ctrl *gomock.Controller) *MockRuntimeMaterializerInterface { + mock := &MockRuntimeMaterializerInterface{ctrl: ctrl} + mock.recorder = &MockRuntimeMaterializerInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRuntimeMaterializerInterface) EXPECT() *MockRuntimeMaterializerInterfaceMockRecorder { + return m.recorder +} + +// MaterializeScroll mocks base method. +func (m *MockRuntimeMaterializerInterface) MaterializeScroll(ctx context.Context, artifact, requestedName string) (*ports.RuntimeMaterialization, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MaterializeScroll", ctx, artifact, requestedName) + ret0, _ := ret[0].(*ports.RuntimeMaterialization) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MaterializeScroll indicates an expected call of MaterializeScroll. +func (mr *MockRuntimeMaterializerInterfaceMockRecorder) MaterializeScroll(ctx, artifact, requestedName any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteStdin", reflect.TypeOf((*MockProcessManagerInterface)(nil).WriteStdin), process, data) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaterializeScroll", reflect.TypeOf((*MockRuntimeMaterializerInterface)(nil).MaterializeScroll), ctx, artifact, requestedName) } // MockBroadcastChannelInterface is a mock of BroadcastChannelInterface interface. @@ -680,135 +554,6 @@ func (mr *MockConsoleManagerInterfaceMockRecorder) GetConsoles() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConsoles", reflect.TypeOf((*MockConsoleManagerInterface)(nil).GetConsoles)) } -// MockProcessMonitorInterface is a mock of ProcessMonitorInterface interface. -type MockProcessMonitorInterface struct { - ctrl *gomock.Controller - recorder *MockProcessMonitorInterfaceMockRecorder - isgomock struct{} -} - -// MockProcessMonitorInterfaceMockRecorder is the mock recorder for MockProcessMonitorInterface. -type MockProcessMonitorInterfaceMockRecorder struct { - mock *MockProcessMonitorInterface -} - -// NewMockProcessMonitorInterface creates a new mock instance. -func NewMockProcessMonitorInterface(ctrl *gomock.Controller) *MockProcessMonitorInterface { - mock := &MockProcessMonitorInterface{ctrl: ctrl} - mock.recorder = &MockProcessMonitorInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProcessMonitorInterface) EXPECT() *MockProcessMonitorInterfaceMockRecorder { - return m.recorder -} - -// AddProcess mocks base method. -func (m *MockProcessMonitorInterface) AddProcess(pid int32, name string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddProcess", pid, name) -} - -// AddProcess indicates an expected call of AddProcess. -func (mr *MockProcessMonitorInterfaceMockRecorder) AddProcess(pid, name any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProcess", reflect.TypeOf((*MockProcessMonitorInterface)(nil).AddProcess), pid, name) -} - -// GetAllProcessesMetrics mocks base method. -func (m *MockProcessMonitorInterface) GetAllProcessesMetrics() map[string]*domain.ProcessMonitorMetrics { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllProcessesMetrics") - ret0, _ := ret[0].(map[string]*domain.ProcessMonitorMetrics) - return ret0 -} - -// GetAllProcessesMetrics indicates an expected call of GetAllProcessesMetrics. -func (mr *MockProcessMonitorInterfaceMockRecorder) GetAllProcessesMetrics() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllProcessesMetrics", reflect.TypeOf((*MockProcessMonitorInterface)(nil).GetAllProcessesMetrics)) -} - -// GetPsTrees mocks base method. -func (m *MockProcessMonitorInterface) GetPsTrees() map[string]*domain.ProcessTreeRoot { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPsTrees") - ret0, _ := ret[0].(map[string]*domain.ProcessTreeRoot) - return ret0 -} - -// GetPsTrees indicates an expected call of GetPsTrees. -func (mr *MockProcessMonitorInterfaceMockRecorder) GetPsTrees() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPsTrees", reflect.TypeOf((*MockProcessMonitorInterface)(nil).GetPsTrees)) -} - -// RemoveProcess mocks base method. -func (m *MockProcessMonitorInterface) RemoveProcess(name string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveProcess", name) -} - -// RemoveProcess indicates an expected call of RemoveProcess. -func (mr *MockProcessMonitorInterfaceMockRecorder) RemoveProcess(name any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProcess", reflect.TypeOf((*MockProcessMonitorInterface)(nil).RemoveProcess), name) -} - -// MockTemplateRendererInterface is a mock of TemplateRendererInterface interface. -type MockTemplateRendererInterface struct { - ctrl *gomock.Controller - recorder *MockTemplateRendererInterfaceMockRecorder - isgomock struct{} -} - -// MockTemplateRendererInterfaceMockRecorder is the mock recorder for MockTemplateRendererInterface. -type MockTemplateRendererInterfaceMockRecorder struct { - mock *MockTemplateRendererInterface -} - -// NewMockTemplateRendererInterface creates a new mock instance. -func NewMockTemplateRendererInterface(ctrl *gomock.Controller) *MockTemplateRendererInterface { - mock := &MockTemplateRendererInterface{ctrl: ctrl} - mock.recorder = &MockTemplateRendererInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTemplateRendererInterface) EXPECT() *MockTemplateRendererInterfaceMockRecorder { - return m.recorder -} - -// RenderScrollTemplateFiles mocks base method. -func (m *MockTemplateRendererInterface) RenderScrollTemplateFiles(templateBase string, templateFiles []string, data any, ouputPath string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RenderScrollTemplateFiles", templateBase, templateFiles, data, ouputPath) - ret0, _ := ret[0].(error) - return ret0 -} - -// RenderScrollTemplateFiles indicates an expected call of RenderScrollTemplateFiles. -func (mr *MockTemplateRendererInterfaceMockRecorder) RenderScrollTemplateFiles(templateBase, templateFiles, data, ouputPath any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenderScrollTemplateFiles", reflect.TypeOf((*MockTemplateRendererInterface)(nil).RenderScrollTemplateFiles), templateBase, templateFiles, data, ouputPath) -} - -// RenderTemplate mocks base method. -func (m *MockTemplateRendererInterface) RenderTemplate(templatePath string, data any) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RenderTemplate", templatePath, data) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RenderTemplate indicates an expected call of RenderTemplate. -func (mr *MockTemplateRendererInterfaceMockRecorder) RenderTemplate(templatePath, data any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenderTemplate", reflect.TypeOf((*MockTemplateRendererInterface)(nil).RenderTemplate), templatePath, data) -} - // MockOciRegistryInterface is a mock of OciRegistryInterface interface. type MockOciRegistryInterface struct { ctrl *gomock.Controller @@ -1060,49 +805,6 @@ func (m *MockPortServiceInterface) EXPECT() *MockPortServiceInterfaceMockRecorde return m.recorder } -// AddPort mocks base method. -func (m *MockPortServiceInterface) AddPort(port domain.Port) (*domain.AugmentedPort, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddPort", port) - ret0, _ := ret[0].(*domain.AugmentedPort) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AddPort indicates an expected call of AddPort. -func (mr *MockPortServiceInterfaceMockRecorder) AddPort(port any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPort", reflect.TypeOf((*MockPortServiceInterface)(nil).AddPort), port) -} - -// CheckOpen mocks base method. -func (m *MockPortServiceInterface) CheckOpen(prot int) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckOpen", prot) - ret0, _ := ret[0].(bool) - return ret0 -} - -// CheckOpen indicates an expected call of CheckOpen. -func (mr *MockPortServiceInterfaceMockRecorder) CheckOpen(prot any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckOpen", reflect.TypeOf((*MockPortServiceInterface)(nil).CheckOpen), prot) -} - -// GetLastActivity mocks base method. -func (m *MockPortServiceInterface) GetLastActivity(port int) uint { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastActivity", port) - ret0, _ := ret[0].(uint) - return ret0 -} - -// GetLastActivity indicates an expected call of GetLastActivity. -func (mr *MockPortServiceInterfaceMockRecorder) GetLastActivity(port any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastActivity", reflect.TypeOf((*MockPortServiceInterface)(nil).GetLastActivity), port) -} - // GetPorts mocks base method. func (m *MockPortServiceInterface) GetPorts() []*domain.AugmentedPort { m.ctrl.T.Helper() @@ -1117,46 +819,6 @@ func (mr *MockPortServiceInterfaceMockRecorder) GetPorts() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPorts", reflect.TypeOf((*MockPortServiceInterface)(nil).GetPorts)) } -// MandatoryPortsOpen mocks base method. -func (m *MockPortServiceInterface) MandatoryPortsOpen() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MandatoryPortsOpen") - ret0, _ := ret[0].(bool) - return ret0 -} - -// MandatoryPortsOpen indicates an expected call of MandatoryPortsOpen. -func (mr *MockPortServiceInterfaceMockRecorder) MandatoryPortsOpen() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MandatoryPortsOpen", reflect.TypeOf((*MockPortServiceInterface)(nil).MandatoryPortsOpen)) -} - -// RemovePort mocks base method. -func (m *MockPortServiceInterface) RemovePort(port int) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemovePort", port) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemovePort indicates an expected call of RemovePort. -func (mr *MockPortServiceInterfaceMockRecorder) RemovePort(port any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePort", reflect.TypeOf((*MockPortServiceInterface)(nil).RemovePort), port) -} - -// StartMonitoring mocks base method. -func (m *MockPortServiceInterface) StartMonitoring(arg0 context.Context, arg1 []string, arg2 uint) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StartMonitoring", arg0, arg1, arg2) -} - -// StartMonitoring indicates an expected call of StartMonitoring. -func (mr *MockPortServiceInterfaceMockRecorder) StartMonitoring(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartMonitoring", reflect.TypeOf((*MockPortServiceInterface)(nil).StartMonitoring), arg0, arg1, arg2) -} - // MockColdStarterHandlerInterface is a mock of ColdStarterHandlerInterface interface. type MockColdStarterHandlerInterface struct { ctrl *gomock.Controller @@ -1535,55 +1197,3 @@ func (mr *MockWatchServiceInterfaceMockRecorder) Unsubscribe(client any) *gomock mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unsubscribe", reflect.TypeOf((*MockWatchServiceInterface)(nil).Unsubscribe), client) } - -// MockNixDependencyServiceInterface is a mock of NixDependencyServiceInterface interface. -type MockNixDependencyServiceInterface struct { - ctrl *gomock.Controller - recorder *MockNixDependencyServiceInterfaceMockRecorder - isgomock struct{} -} - -// MockNixDependencyServiceInterfaceMockRecorder is the mock recorder for MockNixDependencyServiceInterface. -type MockNixDependencyServiceInterfaceMockRecorder struct { - mock *MockNixDependencyServiceInterface -} - -// NewMockNixDependencyServiceInterface creates a new mock instance. -func NewMockNixDependencyServiceInterface(ctrl *gomock.Controller) *MockNixDependencyServiceInterface { - mock := &MockNixDependencyServiceInterface{ctrl: ctrl} - mock.recorder = &MockNixDependencyServiceInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNixDependencyServiceInterface) EXPECT() *MockNixDependencyServiceInterfaceMockRecorder { - return m.recorder -} - -// EnsureNixInstalled mocks base method. -func (m *MockNixDependencyServiceInterface) EnsureNixInstalled() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnsureNixInstalled") - ret0, _ := ret[0].(error) - return ret0 -} - -// EnsureNixInstalled indicates an expected call of EnsureNixInstalled. -func (mr *MockNixDependencyServiceInterfaceMockRecorder) EnsureNixInstalled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureNixInstalled", reflect.TypeOf((*MockNixDependencyServiceInterface)(nil).EnsureNixInstalled)) -} - -// GetCommand mocks base method. -func (m *MockNixDependencyServiceInterface) GetCommand(cmd, deps []string) []string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCommand", cmd, deps) - ret0, _ := ret[0].([]string) - return ret0 -} - -// GetCommand indicates an expected call of GetCommand. -func (mr *MockNixDependencyServiceInterfaceMockRecorder) GetCommand(cmd, deps any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommand", reflect.TypeOf((*MockNixDependencyServiceInterface)(nil).GetCommand), cmd, deps) -} diff --git a/test/utils/daemon_http_api.go b/test/utils/daemon_http_api.go deleted file mode 100644 index f5cf6e9e..00000000 --- a/test/utils/daemon_http_api.go +++ /dev/null @@ -1,55 +0,0 @@ -package test_utils - -import ( - "encoding/json" - "errors" - "log" - "time" - - "github.com/highcard-dev/daemon/internal/api" - "github.com/highcard-dev/daemon/internal/core/domain" -) - -func WaitForConsoleRunning(console string, duration time.Duration) error { - - timeout := time.After(duration) - - ticker := time.NewTicker(1 * time.Second) - for { - select { - case <-timeout: - return errors.New("timeout waiting for console to start") - case <-ticker.C: - body, err := FetchBytes("http://localhost:8081/api/v1/consoles") - if err != nil { - continue - } - - var resp api.ConsolesResponse - - json.Unmarshal(body, &resp) - - consoles := resp.Consoles - - if _, ok := consoles[console]; ok { - return nil - } else { - keys := make([]string, 0, len(consoles)) - for k := range consoles { - keys = append(keys, k) - } - log.Printf("console %s not found, found: %v", console, keys) - } - } - } -} - -func FetchPorts() ([]domain.AugmentedPort, error) { - body, err := FetchBytes("http://localhost:8081/api/v1/ports") - if err != nil { - return nil, err - } - var ap []domain.AugmentedPort - json.Unmarshal(body, &ap) - return ap, nil -} diff --git a/test/utils/setup_serve_commands.go b/test/utils/setup_serve_commands.go deleted file mode 100644 index a22c63a6..00000000 --- a/test/utils/setup_serve_commands.go +++ /dev/null @@ -1,79 +0,0 @@ -package test_utils - -import ( - "bytes" - "context" - "fmt" - "testing" - "time" - - "github.com/highcard-dev/daemon/cmd" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" -) - -func StartAndTestServeCommand(ctx context.Context, t *testing.T, rootCmd *cobra.Command) (bool, error) { - - connectedChan := make(chan struct{}, 1) - executionDoneChan := make(chan error, 1) - - go func() { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if CheckHttpServer(8081, time.Second*20) == nil { - connectedChan <- struct{}{} - return - } - case <-ctx.Done(): - return - } - } - }() - - go func(ctx context.Context) { - cmd.ServeCommand.SetContext(ctx) - - err := rootCmd.ExecuteContext(ctx) - - if err != nil { - executionDoneChan <- err - return - } - - executionDoneChan <- nil - }(ctx) - - select { - case <-connectedChan: - t.Logf("Connected to server") - return true, nil - case err := <-executionDoneChan: - t.Logf("Execution done") - return false, err - } -} - -func SetupServeCmd(ctx context.Context, t *testing.T, cwd string, additionalArgs []string) { - - args := append([]string{"--cwd", cwd, "serve"}, additionalArgs...) - - b := bytes.NewBufferString("") - - serveCmd := cmd.RootCmd - serveCmd.SetErr(b) - serveCmd.SetOut(b) - serveCmd.SetArgs(args) - // Create a new context for each test case - - cmd.ServeCommand.SetContext(ctx) - - logger.Log().Info(fmt.Sprintf("Running serve command with args: %v", args)) - - connected, err := StartAndTestServeCommand(ctx, t, serveCmd) - if !connected { - t.Fatalf("Failed to connect to daemon web server: %v", err) - } -} diff --git a/test/utils/utils.go b/test/utils/utils.go index 41a4445c..7049a1cc 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -4,23 +4,8 @@ import ( "errors" "net" "time" - - mock_ports "github.com/highcard-dev/daemon/test/mock" - "go.uber.org/mock/gomock" ) -var processMonitor *mock_ports.MockProcessMonitorInterface - -func GetMockedProcessMonitor(ctrl *gomock.Controller) *mock_ports.MockProcessMonitorInterface { - if processMonitor == nil { - processMonitor = mock_ports.NewMockProcessMonitorInterface(ctrl) - } - processMonitor.EXPECT().AddProcess(gomock.Any(), gomock.Any()).AnyTimes() - processMonitor.EXPECT().RemoveProcess(gomock.Any()).AnyTimes() - processMonitor.EXPECT().GetAllProcessesMetrics().AnyTimes() - return processMonitor -} - func ConnectionTest(testAddress string, checkOnline bool) error { doneConnecting := make(chan error) From 6e4faebcf3bae630c9964942e538bf46778b2687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Schottst=C3=A4dt?= Date: Sun, 10 May 2026 02:09:12 +0200 Subject: [PATCH 2/6] feat: move runtime ownership to druid cli --- .github/workflows/build.yml | 5 +- .github/workflows/pr.yml | 5 +- .gitignore | 4 +- Dockerfile.testing | 4 +- Makefile | 27 +- api/openapi.yaml | 359 +++ apps/druid-client/adapters/cli/create.go | 31 +- apps/druid-client/adapters/cli/lifecycle.go | 41 + apps/druid-client/adapters/cli/push.go | 3 - .../adapters/cli/push_category.go | 5 - apps/druid-client/adapters/cli/register.go | 2 +- apps/druid-client/adapters/cli/root.go | 3 + apps/druid-client/adapters/cli/routing.go | 95 + .../adapters/daemon/openapi_client.go | 50 +- .../adapters/daemon/openapi_client_test.go | 40 + .../druid-client/core/ports/runtime_daemon.go | 6 +- .../core/services/runtime_service.go | 20 +- apps/druid/adapters/cli/root_test.go | 8 +- apps/druid/adapters/cli/serve.go | 64 +- apps/druid/adapters/http/handlers/routes.go | 20 + .../adapters/http/handlers/routes_test.go | 48 + .../adapters/http/handlers/scroll_handler.go | 351 ++- .../http/handlers/scroll_handler_test.go | 30 + .../http/handlers/websocket_handler.go | 11 + .../druid/core/services/runtime_controller.go | 410 ++- .../core/services/runtime_controller_test.go | 120 + config/helm-charts/druid-cli/Chart.yaml | 6 + config/helm-charts/druid-cli/chart_test.go | 83 + .../druid-cli/templates/_helpers.tpl | 37 + .../druid-cli/templates/deployment.yaml | 124 + .../druid-cli/templates/ingress.yaml | 35 + .../druid-cli/templates/networkpolicy.yaml | 25 + .../helm-charts/druid-cli/templates/pvc.yaml | 18 + .../helm-charts/druid-cli/templates/rbac.yaml | 41 + .../druid-cli/templates/service.yaml | 19 + .../druid-cli/templates/serviceaccount.yaml | 12 + config/helm-charts/druid-cli/values.yaml | 89 + internal/api/generated.go | 2387 ++++++++++++++--- internal/core/domain/runtime_scroll.go | 46 +- internal/core/domain/scroll.go | 8 + internal/core/ports/services_ports.go | 29 +- internal/core/services/procedure_launcher.go | 71 +- .../core/services/procedure_launcher_test.go | 139 + internal/core/services/queue_manager.go | 3 + internal/core/services/queue_manager_test.go | 2 + internal/core/services/runtime_env.go | 130 + .../core/services/runtime_scroll_manager.go | 23 + .../services/runtime_scroll_manager_test.go | 37 + internal/core/services/runtime_state_store.go | 53 +- internal/runtime/docker/backend.go | 117 +- internal/runtime/kubernetes/backend.go | 352 ++- internal/runtime/kubernetes/resources.go | 51 +- internal/runtime/kubernetes/resources_test.go | 211 +- internal/runtime/kubernetes/state_store.go | 18 + internal/runtime/runtime_test.go | 60 + test/integration/docker/docker_cli_test.go | 121 + test/integration/example_test.go | 3 +- test/integration/internal/e2e/harness.go | 396 +++ .../kubernetes/kubernetes_cli_test.go | 339 +++ 59 files changed, 6380 insertions(+), 467 deletions(-) create mode 100644 apps/druid-client/adapters/cli/lifecycle.go create mode 100644 apps/druid-client/adapters/cli/routing.go create mode 100644 apps/druid-client/adapters/daemon/openapi_client_test.go create mode 100644 apps/druid/adapters/http/handlers/routes_test.go create mode 100644 apps/druid/adapters/http/handlers/scroll_handler_test.go create mode 100644 config/helm-charts/druid-cli/Chart.yaml create mode 100644 config/helm-charts/druid-cli/chart_test.go create mode 100644 config/helm-charts/druid-cli/templates/_helpers.tpl create mode 100644 config/helm-charts/druid-cli/templates/deployment.yaml create mode 100644 config/helm-charts/druid-cli/templates/ingress.yaml create mode 100644 config/helm-charts/druid-cli/templates/networkpolicy.yaml create mode 100644 config/helm-charts/druid-cli/templates/pvc.yaml create mode 100644 config/helm-charts/druid-cli/templates/rbac.yaml create mode 100644 config/helm-charts/druid-cli/templates/service.yaml create mode 100644 config/helm-charts/druid-cli/templates/serviceaccount.yaml create mode 100644 config/helm-charts/druid-cli/values.yaml create mode 100644 internal/core/services/runtime_env.go create mode 100644 test/integration/docker/docker_cli_test.go create mode 100644 test/integration/internal/e2e/harness.go create mode 100644 test/integration/kubernetes/kubernetes_cli_test.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1a4b016d..d1db12dc 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,8 +10,9 @@ jobs: - uses: actions/setup-go@v5 with: go-version: "^1.24" - - run: make test-integration-docker - name: Run integration tests inside Docker + - uses: azure/setup-kubectl@v4 + - run: make test-integration + name: Run CLI backend integration tests - run: make test name: Unit tests diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 80381787..1173de7e 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -20,8 +20,9 @@ jobs: - uses: actions/setup-go@v5 with: go-version: "^1.24" - - run: make test-integration-docker - name: Run integration tests inside Docker + - uses: azure/setup-kubectl@v4 + - run: make test-integration + name: Run CLI backend integration tests validate-api: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index ba96ce1b..6ce3fc18 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,8 @@ druid** !apps/ !apps/** +!config/ +!config/** dlv.log .DS_Store @@ -18,4 +20,4 @@ druid-cli-test !.docker/** .env -.runtime-state \ No newline at end of file +.runtime-state diff --git a/Dockerfile.testing b/Dockerfile.testing index 23d8e494..6c99ec3d 100644 --- a/Dockerfile.testing +++ b/Dockerfile.testing @@ -5,7 +5,7 @@ WORKDIR /app RUN apt update && apt install -y ca-certificates wget jq moreutils htop procps nano net-tools gcc make openjdk-17-jdk ant netcat-traditional -RUN wget https://go.dev/dl/go1.21.6.linux-$(dpkg --print-architecture).tar.gz -O go.tar.gz +RUN wget https://go.dev/dl/go1.24.7.linux-$(dpkg --print-architecture).tar.gz -O go.tar.gz RUN tar -C /usr/local -xzf go.tar.gz && rm go.tar.gz #/root/go/bin is not in the path @@ -18,4 +18,4 @@ RUN go install github.com/go-delve/delve/cmd/dlv@latest COPY go.mod go.sum ./ # Download all dependencies. Dependencies will be cached if the go.mod and go.sum files are not changed -RUN go mod download \ No newline at end of file +RUN go mod download diff --git a/Makefile b/Makefile index f297bcc7..c60b7ee1 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,11 @@ -.PHONY: test build build-coldstarter-image +.PHONY: test build build-coldstarter-image test-integration test-integration-docker test-integration-kubernetes kind-integration-up kind-integration-down VERSION ?= "dev" COLDSTARTER_IMAGE ?= druid-coldstarter:local +INTEGRATION_TIMEOUT ?= 1200s +KIND_CLUSTER ?= druid-cli-integration +KIND_VERSION ?= v0.27.0 +GO_BIN ?= $(shell go env GOPATH)/bin generate-api: ## Generate API types from OpenAPI spec @echo "Generating API types from OpenAPI spec..." @@ -49,14 +53,23 @@ test-docker: docker run -v ./:/app --entrypoint=/bin/bash --rm druid-cli-test -c "go test -v ./..." -test-integration: - go test -timeout 1200s -tags=integration ./test/integration +test-integration: test-integration-docker test-integration-kubernetes test-integration-docker: - docker build . -f Dockerfile.testing -t druid-cli-test - docker run -v ./:/app --entrypoint=/bin/bash --rm druid-cli-test -c "go test -timeout 1200s -tags=integration -v ./test/integration" - docker run -v ./:/app --entrypoint=/bin/bash --rm druid-cli-test -c "go test -timeout 1200s -tags=integration -v ./test/integration/commands" + go test -count=1 -timeout $(INTEGRATION_TIMEOUT) -tags='integration docker' -v ./test/integration/docker + +test-integration-kubernetes: kind-integration-up + go test -count=1 -timeout $(INTEGRATION_TIMEOUT) -tags='integration kubernetes' -v ./test/integration/kubernetes + +kind-integration-up: + @command -v kind >/dev/null 2>&1 || (echo "Installing kind $(KIND_VERSION)..." && go install sigs.k8s.io/kind@$(KIND_VERSION)) + @PATH="$(GO_BIN):$$PATH"; if ! kind get clusters | grep -qx "$(KIND_CLUSTER)"; then kind create cluster --name "$(KIND_CLUSTER)" --wait 120s; fi + @PATH="$(GO_BIN):$$PATH"; kind export kubeconfig --name "$(KIND_CLUSTER)" >/dev/null + @kubectl config use-context "kind-$(KIND_CLUSTER)" >/dev/null + +kind-integration-down: + @PATH="$(GO_BIN):$$PATH"; kind delete cluster --name "$(KIND_CLUSTER)" test-integration-docker-debug: docker build . -f Dockerfile.testing -t druid-cli-test - docker run -v ./:/app --entrypoint=/bin/bash --rm -p 2345:2345 -it druid-cli-test -c "dlv --listen=:2345 --headless=true --log=true --log-output=debugger,debuglineerr,gdbwire,lldbout,rpc --accept-multiclient --api-version=2 test ./test/integration/commands" + docker run -v ./:/app -v /var/run/docker.sock:/var/run/docker.sock --entrypoint=/bin/bash --rm -p 2345:2345 -it druid-cli-test -c "dlv --listen=:2345 --headless=true --log=true --log-output=debugger,debuglineerr,gdbwire,lldbout,rpc --accept-multiclient --api-version=2 test --build-flags='-tags=integration docker' ./test/integration/docker" diff --git a/api/openapi.yaml b/api/openapi.yaml index d253a29d..34bc8632 100644 --- a/api/openapi.yaml +++ b/api/openapi.yaml @@ -92,6 +92,110 @@ components: data_root: type: string description: Optional daemon-local path or backend ref containing runtime data directory. If omitted, a materializing runtime backend may pull the artifact. + start: + type: boolean + default: true + + EnsureScrollRequest: + type: object + required: + - artifact + properties: + id: + type: string + name: + type: string + artifact: + type: string + scroll_root: + type: string + data_root: + type: string + start: + type: boolean + default: true + + RuntimeRoutingTarget: + type: object + required: + - name + - procedure + - port_name + - port + - protocol + - service_name + - service_port + properties: + name: + type: string + procedure: + type: string + port_name: + type: string + port: + type: integer + protocol: + type: string + namespace: + type: string + service_name: + type: string + service_port: + type: integer + selector: + type: object + additionalProperties: + type: string + + RuntimeRouteAssignment: + type: object + properties: + name: + type: string + port_name: + type: string + host: + type: string + external_ip: + type: string + public_port: + type: integer + url: + type: string + protocol: + type: string + + ApplyRoutingRequest: + type: object + required: + - assignments + properties: + assignments: + type: array + items: + $ref: '#/components/schemas/RuntimeRouteAssignment' + + RuntimeArtifactOperationRequest: + type: object + required: + - artifact + properties: + artifact: + type: string + restart: + type: boolean + default: false + + CommandStatusMap: + type: object + additionalProperties: true + + ScrollLogMap: + type: object + additionalProperties: + type: array + items: + type: string RuntimeScroll: type: object required: @@ -119,6 +223,12 @@ components: status: type: string enum: [created, running, stopped, error, deleted] + last_error: + type: string + routing: + type: array + items: + $ref: '#/components/schemas/RuntimeRouteAssignment' created_at: type: string format: date-time @@ -223,6 +333,25 @@ paths: schema: $ref: '#/components/schemas/RuntimeScroll' + /api/v1/scrolls/ensure: + post: + operationId: ensureScroll + summary: Ensure runtime scroll exists and optionally starts + tags: [runtime, daemon] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EnsureScrollRequest' + responses: + '200': + description: Runtime scroll ensured + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeScroll' + /api/v1/scrolls/{id}: get: operationId: getScroll @@ -263,6 +392,44 @@ paths: '404': description: Runtime scroll not found + /api/v1/scrolls/{id}/start: + post: + operationId: startScroll + summary: Start runtime scroll + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Started runtime scroll + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeScroll' + + /api/v1/scrolls/{id}/stop: + post: + operationId: stopScroll + summary: Stop runtime scroll workloads while preserving data + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Stopped runtime scroll + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeScroll' + /api/v1/scrolls/{id}/commands/{command}: post: operationId: runScrollCommand @@ -289,6 +456,102 @@ paths: '404': description: Runtime scroll not found + /api/v1/scrolls/{id}/config: + get: + operationId: getScrollConfig + summary: Get parsed scroll config + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Parsed scroll config + content: + application/json: + schema: + type: object + + /api/v1/scrolls/{id}/queue: + get: + operationId: getScrollQueue + summary: Get runtime queue state + tags: [queue, runtime] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Queue state + content: + application/json: + schema: + $ref: '#/components/schemas/CommandStatusMap' + + /api/v1/scrolls/{id}/procedures: + get: + operationId: getScrollProcedures + summary: Get procedure state + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Procedure state + content: + application/json: + schema: + $ref: '#/components/schemas/CommandStatusMap' + + /api/v1/scrolls/{id}/consoles: + get: + operationId: getScrollConsoles + summary: Get scroll-scoped consoles + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Consoles keyed by procedure + content: + application/json: + schema: + type: object + additionalProperties: true + + /api/v1/scrolls/{id}/logs: + get: + operationId: getScrollLogs + summary: Get scroll-scoped logs + tags: [logs, runtime] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Logs keyed by procedure + content: + application/json: + schema: + $ref: '#/components/schemas/ScrollLogMap' + /api/v1/scrolls/{id}/ports: get: operationId: getScrollPorts @@ -312,6 +575,102 @@ paths: '404': description: Runtime scroll not found + /api/v1/scrolls/{id}/routing/targets: + get: + operationId: getScrollRoutingTargets + summary: Get stable backend routing targets + tags: [runtime, port] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Routing targets + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/RuntimeRoutingTarget' + + /api/v1/scrolls/{id}/routing: + post: + operationId: applyScrollRouting + summary: Persist operator-assigned public routing + tags: [runtime, port] + parameters: + - name: id + in: path + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ApplyRoutingRequest' + responses: + '200': + description: Updated runtime scroll + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeScroll' + + /api/v1/scrolls/{id}/backup: + post: + operationId: backupScroll + summary: Execute runtime backup + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeArtifactOperationRequest' + responses: + '200': + description: Backup completed + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeScroll' + + /api/v1/scrolls/{id}/restore: + post: + operationId: restoreScroll + summary: Execute runtime restore + tags: [runtime, daemon] + parameters: + - name: id + in: path + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeArtifactOperationRequest' + responses: + '200': + description: Restore completed + content: + application/json: + schema: + $ref: '#/components/schemas/RuntimeScroll' + # Health Endpoint /api/v1/health: get: diff --git a/apps/druid-client/adapters/cli/create.go b/apps/druid-client/adapters/cli/create.go index 4e0a6456..2fdaf54a 100644 --- a/apps/druid-client/adapters/cli/create.go +++ b/apps/druid-client/adapters/cli/create.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/highcard-dev/daemon/apps/druid-client/adapters/daemon" "github.com/highcard-dev/daemon/internal/core/domain" @@ -19,6 +20,7 @@ func (a *App) createCmd() *cobra.Command { var scrollRoot string var dataRoot string var noData bool + var noStart bool cmd := &cobra.Command{ Use: "create [name]", Short: "Create a scroll through the daemon", @@ -36,8 +38,8 @@ func (a *App) createCmd() *cobra.Command { } stateDir = defaultStateDir } - if (scrollRoot == "") != (dataRoot == "") { - return fmt.Errorf("--scroll-root and --data-root must be provided together") + if (scrollRoot == "") != (dataRoot == "") || (scrollRoot != "" && scrollRoot != dataRoot) { + return fmt.Errorf("--scroll-root and --data-root are legacy flags and must be omitted or equal") } service, err := a.runtimeService() @@ -46,12 +48,17 @@ func (a *App) createCmd() *cobra.Command { } if scrollRoot != "" { - if err := coreservices.MaterializeScrollArtifact(artifact, scrollRoot, dataRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { - return err + if strings.Contains(scrollRoot, "://") { + dataRoot = scrollRoot + } else { + if err := coreservices.MaterializeScrollArtifact(artifact, scrollRoot, scrollRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { + return err + } + dataRoot = scrollRoot } } else { if !localArtifactExists(artifact) { - scroll, err := service.Create(cmd.Context(), name, artifact, "", "") + scroll, err := service.Create(cmd.Context(), name, artifact, "", "", !noStart) if err == nil { return printJSON(scroll) } @@ -70,12 +77,11 @@ func (a *App) createCmd() *cobra.Command { } defer os.RemoveAll(tmpDir) - stagedScrollRoot := filepath.Join(tmpDir, "spec") - stagedDataRoot := filepath.Join(tmpDir, "data") - if err := coreservices.MaterializeScrollArtifact(artifact, stagedScrollRoot, stagedDataRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { + stagedRoot := filepath.Join(tmpDir, "root") + if err := coreservices.MaterializeScrollArtifact(artifact, stagedRoot, stagedRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { return err } - stagedScroll, err := domain.NewScroll(stagedScrollRoot) + stagedScroll, err := domain.NewScroll(stagedRoot) if err != nil { return err } @@ -84,13 +90,13 @@ func (a *App) createCmd() *cobra.Command { return err } scrollRoot = store.ScrollRoot(id) - dataRoot = store.DataRoot(id) - if err := coreservices.MoveMaterializedScroll(stagedScrollRoot, stagedDataRoot, scrollRoot, dataRoot); err != nil { + dataRoot = scrollRoot + if err := coreservices.MoveMaterializedScroll(stagedRoot, stagedRoot, scrollRoot, dataRoot); err != nil { return err } } - scroll, err := service.Create(cmd.Context(), name, artifact, scrollRoot, dataRoot) + scroll, err := service.Create(cmd.Context(), name, artifact, scrollRoot, dataRoot, !noStart) if err != nil { return err } @@ -101,6 +107,7 @@ func (a *App) createCmd() *cobra.Command { cmd.Flags().StringVar(&scrollRoot, "scroll-root", "", "Daemon-local path containing materialized scroll spec") cmd.Flags().StringVar(&dataRoot, "data-root", "", "Daemon-local path containing runtime data") cmd.Flags().BoolVar(&noData, "no-data", false, "Skip scroll data files") + cmd.Flags().BoolVar(&noStart, "no-start", false, "Create the scroll without starting its serve command") return cmd } diff --git a/apps/druid-client/adapters/cli/lifecycle.go b/apps/druid-client/adapters/cli/lifecycle.go new file mode 100644 index 00000000..2126b50f --- /dev/null +++ b/apps/druid-client/adapters/cli/lifecycle.go @@ -0,0 +1,41 @@ +package cli + +import "github.com/spf13/cobra" + +func (a *App) startCmd() *cobra.Command { + return &cobra.Command{ + Use: "start ", + Short: "Start the daemon-managed scroll serve command", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + scroll, err := service.Start(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(scroll) + }, + } +} + +func (a *App) stopCmd() *cobra.Command { + return &cobra.Command{ + Use: "stop ", + Short: "Stop daemon-managed runtime workloads for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + scroll, err := service.Stop(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(scroll) + }, + } +} diff --git a/apps/druid-client/adapters/cli/push.go b/apps/druid-client/adapters/cli/push.go index fe63a677..ec7889b1 100644 --- a/apps/druid-client/adapters/cli/push.go +++ b/apps/druid-client/adapters/cli/push.go @@ -30,9 +30,6 @@ func (a *App) pushCmd() *cobra.Command { Args: cobra.MaximumNArgs(2), RunE: func(cmd *cobra.Command, args []string) error { credStore := a.loadRegistryStore() - if !credStore.HasCredentials() { - return fmt.Errorf("no registry credentials configured. Please use `druid-client login` to set them") - } fullPath := currentWorkingDir() artifact := "" diff --git a/apps/druid-client/adapters/cli/push_category.go b/apps/druid-client/adapters/cli/push_category.go index 9b807853..20bb783f 100644 --- a/apps/druid-client/adapters/cli/push_category.go +++ b/apps/druid-client/adapters/cli/push_category.go @@ -1,8 +1,6 @@ package cli import ( - "fmt" - "github.com/highcard-dev/daemon/internal/core/services/registry" "github.com/highcard-dev/daemon/internal/utils/logger" "github.com/spf13/cobra" @@ -18,9 +16,6 @@ func (a *App) pushCategoryCmd() *cobra.Command { Args: cobra.RangeArgs(2, 3), RunE: func(cmd *cobra.Command, args []string) error { credStore := a.loadRegistryStore() - if !credStore.HasCredentials() { - return fmt.Errorf("no registry credentials configured. Please use `druid-client login` to set them") - } repo := args[0] category := args[1] diff --git a/apps/druid-client/adapters/cli/register.go b/apps/druid-client/adapters/cli/register.go index d0bcf5d1..0fffa878 100644 --- a/apps/druid-client/adapters/cli/register.go +++ b/apps/druid-client/adapters/cli/register.go @@ -37,7 +37,7 @@ func (a *App) registerCmd() *cobra.Command { if err != nil { return err } - scroll, err := service.Create(cmd.Context(), name, scrollRoot, scrollRoot, scrollRoot) + scroll, err := service.Create(cmd.Context(), name, scrollRoot, scrollRoot, scrollRoot, true) if err != nil { return err } diff --git a/apps/druid-client/adapters/cli/root.go b/apps/druid-client/adapters/cli/root.go index 165e46cc..cb80ffad 100644 --- a/apps/druid-client/adapters/cli/root.go +++ b/apps/druid-client/adapters/cli/root.go @@ -37,7 +37,10 @@ func NewRootCommand() *cobra.Command { cmd.AddCommand(app.describeCmd()) cmd.AddCommand(app.deleteCmd()) cmd.AddCommand(app.runCmd()) + cmd.AddCommand(app.startCmd()) + cmd.AddCommand(app.stopCmd()) cmd.AddCommand(app.portsCmd()) + cmd.AddCommand(app.routingCmd()) cmd.AddCommand(app.attachCmd()) cmd.AddCommand(app.pullCmd()) cmd.AddCommand(app.pushCmd()) diff --git a/apps/druid-client/adapters/cli/routing.go b/apps/druid-client/adapters/cli/routing.go new file mode 100644 index 00000000..b0e47e0f --- /dev/null +++ b/apps/druid-client/adapters/cli/routing.go @@ -0,0 +1,95 @@ +package cli + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/highcard-dev/daemon/internal/api" + "github.com/spf13/cobra" +) + +func (a *App) routingCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "routing", + Short: "Inspect or apply daemon runtime routing", + } + cmd.AddCommand(a.routingTargetsCmd()) + cmd.AddCommand(a.routingApplyCmd()) + return cmd +} + +func (a *App) routingTargetsCmd() *cobra.Command { + return &cobra.Command{ + Use: "targets ", + Short: "Show backend service targets for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + service, err := a.runtimeService() + if err != nil { + return err + } + targets, err := service.RoutingTargets(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(targets) + }, + } +} + +func (a *App) routingApplyCmd() *cobra.Command { + var file string + cmd := &cobra.Command{ + Use: "apply ", + Short: "Persist assigned public routing for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if file == "" { + return fmt.Errorf("--file is required") + } + payload, err := readRoutingAssignments(file) + if err != nil { + return err + } + service, err := a.runtimeService() + if err != nil { + return err + } + scroll, err := service.ApplyRouting(cmd.Context(), args[0], payload.Assignments) + if err != nil { + return err + } + return printJSON(scroll) + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "JSON file with an assignments array, or '-' for stdin") + return cmd +} + +type routingAssignmentsPayload struct { + Assignments []api.RuntimeRouteAssignment `json:"assignments"` +} + +func readRoutingAssignments(file string) (routingAssignmentsPayload, error) { + var data []byte + var err error + if file == "-" { + data, err = io.ReadAll(os.Stdin) + } else { + data, err = os.ReadFile(file) + } + if err != nil { + return routingAssignmentsPayload{}, err + } + var payload routingAssignmentsPayload + if err := json.Unmarshal(data, &payload); err == nil && payload.Assignments != nil { + return payload, nil + } + var assignments []api.RuntimeRouteAssignment + if err := json.Unmarshal(data, &assignments); err != nil { + return routingAssignmentsPayload{}, err + } + return routingAssignmentsPayload{Assignments: assignments}, nil +} diff --git a/apps/druid-client/adapters/daemon/openapi_client.go b/apps/druid-client/adapters/daemon/openapi_client.go index 116bea84..ba9ba302 100644 --- a/apps/druid-client/adapters/daemon/openapi_client.go +++ b/apps/druid-client/adapters/daemon/openapi_client.go @@ -34,7 +34,7 @@ func NewOpenAPIClient(daemonSocket string) (*OpenAPIClient, error) { return &OpenAPIClient{client: client}, nil } -func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string) (*api.RuntimeScroll, error) { +func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string, start bool) (*api.RuntimeScroll, error) { var requestName *string if name != "" { requestName = &name @@ -52,6 +52,7 @@ func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact Name: requestName, ScrollRoot: requestScrollRoot, DataRoot: requestDataRoot, + Start: &start, }) if err != nil { return nil, err @@ -126,6 +127,53 @@ func (c *OpenAPIClient) GetScrollPorts(ctx context.Context, id string) ([]api.Ru return *res.JSON200, nil } +func (c *OpenAPIClient) StartScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) { + res, err := c.client.StartScrollWithResponse(ctx, id) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON200, nil +} + +func (c *OpenAPIClient) StopScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) { + res, err := c.client.StopScrollWithResponse(ctx, id) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON200, nil +} + +func (c *OpenAPIClient) GetScrollRoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) { + res, err := c.client.GetScrollRoutingTargetsWithResponse(ctx, id) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + if res.JSON200 == nil { + return nil, nil + } + return *res.JSON200, nil +} + +func (c *OpenAPIClient) ApplyScrollRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) { + res, err := c.client.ApplyScrollRoutingWithResponse(ctx, id, api.ApplyRoutingRequest{Assignments: assignments}) + if err != nil { + return nil, err + } + if err := ensureStatus(res.StatusCode(), res.Body); err != nil { + return nil, err + } + return res.JSON200, nil +} + func ensureStatus(statusCode int, body []byte) error { if statusCode < 400 { return nil diff --git a/apps/druid-client/adapters/daemon/openapi_client_test.go b/apps/druid-client/adapters/daemon/openapi_client_test.go new file mode 100644 index 00000000..223b4077 --- /dev/null +++ b/apps/druid-client/adapters/daemon/openapi_client_test.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/highcard-dev/daemon/internal/api" +) + +func TestCreateScrollSendsStartFalse(t *testing.T) { + var got struct { + Start *bool `json:"start"` + } + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/v1/scrolls" { + t.Fatalf("path = %s, want /api/v1/scrolls", r.URL.Path) + } + if err := json.NewDecoder(r.Body).Decode(&got); err != nil { + t.Fatal(err) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + _, _ = w.Write([]byte(`{"id":"scroll-a","artifact":"artifact","scroll_root":"/root","data_root":"/root","scroll_name":"scroll","status":"created","created_at":"2026-05-10T00:00:00Z","updated_at":"2026-05-10T00:00:00Z"}`)) + })) + defer server.Close() + client, err := api.NewClientWithResponses(server.URL) + if err != nil { + t.Fatal(err) + } + openAPIClient := &OpenAPIClient{client: client} + + if _, err := openAPIClient.CreateScroll(t.Context(), "scroll-a", "artifact", "", "", false); err != nil { + t.Fatal(err) + } + if got.Start == nil || *got.Start { + t.Fatalf("start = %#v, want false", got.Start) + } +} diff --git a/apps/druid-client/core/ports/runtime_daemon.go b/apps/druid-client/core/ports/runtime_daemon.go index cb174cfd..16b5182b 100644 --- a/apps/druid-client/core/ports/runtime_daemon.go +++ b/apps/druid-client/core/ports/runtime_daemon.go @@ -7,12 +7,16 @@ import ( ) type RuntimeDaemon interface { - CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string) (*api.RuntimeScroll, error) + CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string, start bool) (*api.RuntimeScroll, error) ListScrolls(ctx context.Context) ([]api.RuntimeScroll, error) GetScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) DeleteScroll(ctx context.Context, id string) (*api.DeletedScroll, error) RunScrollCommand(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) GetScrollPorts(ctx context.Context, id string) ([]api.RuntimePortStatus, error) + StartScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) + StopScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) + GetScrollRoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) + ApplyScrollRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) } type ConsoleAttacher interface { diff --git a/apps/druid-client/core/services/runtime_service.go b/apps/druid-client/core/services/runtime_service.go index 2ab00b9c..027e9d6e 100644 --- a/apps/druid-client/core/services/runtime_service.go +++ b/apps/druid-client/core/services/runtime_service.go @@ -15,8 +15,8 @@ func NewRuntimeService(daemon ports.RuntimeDaemon) *RuntimeService { return &RuntimeService{daemon: daemon} } -func (s *RuntimeService) Create(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string) (*api.RuntimeScroll, error) { - return s.daemon.CreateScroll(ctx, name, artifact, scrollRoot, dataRoot) +func (s *RuntimeService) Create(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string, start bool) (*api.RuntimeScroll, error) { + return s.daemon.CreateScroll(ctx, name, artifact, scrollRoot, dataRoot, start) } func (s *RuntimeService) List(ctx context.Context) ([]api.RuntimeScroll, error) { @@ -38,3 +38,19 @@ func (s *RuntimeService) Run(ctx context.Context, id string, command string) (*a func (s *RuntimeService) Ports(ctx context.Context, id string) ([]api.RuntimePortStatus, error) { return s.daemon.GetScrollPorts(ctx, id) } + +func (s *RuntimeService) Start(ctx context.Context, id string) (*api.RuntimeScroll, error) { + return s.daemon.StartScroll(ctx, id) +} + +func (s *RuntimeService) Stop(ctx context.Context, id string) (*api.RuntimeScroll, error) { + return s.daemon.StopScroll(ctx, id) +} + +func (s *RuntimeService) RoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) { + return s.daemon.GetScrollRoutingTargets(ctx, id) +} + +func (s *RuntimeService) ApplyRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) { + return s.daemon.ApplyScrollRouting(ctx, id, assignments) +} diff --git a/apps/druid/adapters/cli/root_test.go b/apps/druid/adapters/cli/root_test.go index c30bc66e..fafda90e 100644 --- a/apps/druid/adapters/cli/root_test.go +++ b/apps/druid/adapters/cli/root_test.go @@ -10,14 +10,16 @@ func TestRootCommandDoesNotExposeOCICommands(t *testing.T) { } } -func TestServeCommandIsSocketOnly(t *testing.T) { +func TestServeCommandExposesRuntimeListeners(t *testing.T) { for _, name := range []string{"tcp", "port"} { if flag := ServeCommand.Flags().Lookup(name); flag != nil { t.Fatalf("druid serve should not expose --%s", name) } } - if flag := ServeCommand.Flags().Lookup("socket"); flag == nil { - t.Fatal("druid serve should expose --socket") + for _, name := range []string{"socket", "listen", "public-listen", "internal-token"} { + if flag := ServeCommand.Flags().Lookup(name); flag == nil { + t.Fatalf("druid serve should expose --%s", name) + } } } diff --git a/apps/druid/adapters/cli/serve.go b/apps/druid/adapters/cli/serve.go index 04069f73..72e0388f 100644 --- a/apps/druid/adapters/cli/serve.go +++ b/apps/druid/adapters/cli/serve.go @@ -4,6 +4,7 @@ import ( "net" "os" "path/filepath" + "strings" "github.com/gofiber/fiber/v2" runtimehandlers "github.com/highcard-dev/daemon/apps/druid/adapters/http/handlers" @@ -24,6 +25,9 @@ var k8sPullImage string var k8sRegistrySecret string var hubbleRelayAddr string var k8sKubeconfig string +var runtimeListen string +var runtimePublicListen string +var runtimeInternalToken string var ServeCommand = &cobra.Command{ Use: "serve", @@ -36,6 +40,9 @@ var ServeCommand = &cobra.Command{ func init() { ServeCommand.Flags().StringVar(&runtimeSocket, "socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path") + ServeCommand.Flags().StringVar(&runtimeListen, "listen", "", "Optional management HTTP listen address, for example :8081") + ServeCommand.Flags().StringVar(&runtimePublicListen, "public-listen", "", "Optional public dashboard HTTP listen address, for example :8082") + ServeCommand.Flags().StringVar(&runtimeInternalToken, "internal-token", "", "Optional bearer token required for management HTTP API requests") ServeCommand.Flags().StringVar(&runtimeStateDir, "state-dir", "", "Runtime state directory (default: ~/.druid/runtime)") ServeCommand.Flags().StringVar(&runtimeBackend, "runtime", "docker", "Default runtime backend. Valid values: docker, kubernetes") ServeCommand.Flags().StringVar(&k8sNamespace, "k8s-namespace", "", "Kubernetes namespace for runtime resources (default: service account namespace or DRUID_K8S_NAMESPACE)") @@ -67,16 +74,63 @@ func runRuntimeDaemon() error { return err } - app := fiber.New(fiber.Config{DisableStartupMessage: true}) - runtimehandlers.RegisterRoutes(app, runtimehandlers.RouteHandlers{ + if runtimeInternalToken == "" { + runtimeInternalToken = os.Getenv("DRUID_INTERNAL_TOKEN") + } + handlers := runtimehandlers.RouteHandlers{ Server: runtimehandlers.NewRuntimeServer( runtimehandlers.NewHealthHandler(), - runtimehandlers.NewScrollHandler(supervisor), + runtimehandlers.NewScrollHandler(supervisor, consoleService, logManager), ), Websocket: runtimehandlers.NewWebsocketHandler(consoleService), - }) + } - return listenRuntimeDaemon(app, store.StateDir()) + managementApp := fiber.New(fiber.Config{DisableStartupMessage: true}) + if runtimeInternalToken != "" { + managementApp.Use(func(c *fiber.Ctx) error { + path := c.Path() + if path == "/health" || path == "/api/v1/health" { + return c.Next() + } + token := strings.TrimPrefix(c.Get("Authorization"), "Bearer ") + if token == "" { + token = c.Get("X-Druid-Internal-Token") + } + if token != runtimeInternalToken { + return fiber.NewError(fiber.StatusUnauthorized, "invalid internal runtime token") + } + return c.Next() + }) + } + runtimehandlers.RegisterManagementRoutes(managementApp, handlers) + + var publicApp *fiber.App + if runtimePublicListen != "" { + publicApp = fiber.New(fiber.Config{DisableStartupMessage: true}) + runtimehandlers.RegisterPublicRoutes(publicApp, handlers) + } + return listenRuntimeHTTP(managementApp, publicApp, store.StateDir()) +} + +func listenRuntimeHTTP(managementApp *fiber.App, publicApp *fiber.App, stateDir string) error { + errCh := make(chan error, 2) + if runtimeListen != "" { + go func() { + logger.Log().Info("Starting runtime management listener", zap.String("listen", runtimeListen), zap.String("stateDir", stateDir)) + errCh <- managementApp.Listen(runtimeListen) + }() + } else { + go func() { + errCh <- listenRuntimeDaemon(managementApp, stateDir) + }() + } + if publicApp != nil { + go func() { + logger.Log().Info("Starting runtime public listener", zap.String("listen", runtimePublicListen), zap.String("stateDir", stateDir)) + errCh <- publicApp.Listen(runtimePublicListen) + }() + } + return <-errCh } func listenRuntimeDaemon(app *fiber.App, stateDir string) error { diff --git a/apps/druid/adapters/http/handlers/routes.go b/apps/druid/adapters/http/handlers/routes.go index ba9373b6..2ed32cb5 100644 --- a/apps/druid/adapters/http/handlers/routes.go +++ b/apps/druid/adapters/http/handlers/routes.go @@ -21,7 +21,27 @@ func NewRuntimeServer(health *HealthHandler, scrolls *ScrollHandler) *RuntimeSer } func RegisterRoutes(app *fiber.App, handlers RouteHandlers) { + RegisterManagementRoutes(app, handlers) + RegisterPublicRoutes(app, handlers) +} + +func RegisterManagementRoutes(app *fiber.App, handlers RouteHandlers) { api.RegisterHandlersWithOptions(app, handlers.Server, api.FiberServerOptions{}) app.Get("/health", handlers.Server.GetHealthAuth) app.Get("/ws/v1/scrolls/:id/consoles/:console", websocket.New(handlers.Websocket.AttachConsole)) } + +func RegisterPublicRoutes(app *fiber.App, handlers RouteHandlers) { + app.Get("/health", handlers.Server.GetHealthAuth) + app.Get("/:id/ws/v1/serve/:console", websocket.New(handlers.Websocket.AttachScrollConsole)) + app.Get("/:id/api/v1/health", handlers.Server.GetHealthAuth) + app.Get("/:id/api/v1/scroll", handlers.Server.GetDaemonScroll) + app.Post("/:id/api/v1/command", handlers.Server.RunDaemonCommand) + app.Get("/:id/api/v1/queue", handlers.Server.GetDaemonQueue) + app.Get("/:id/api/v1/procedures", handlers.Server.GetDaemonProcedures) + app.Get("/:id/api/v1/consoles", handlers.Server.GetDaemonConsoles) + app.Get("/:id/api/v1/logs", handlers.Server.GetDaemonLogs) + app.Get("/:id/api/v1/logs/:stream", handlers.Server.GetDaemonStreamLogs) + app.Get("/:id/api/v1/ports", handlers.Server.GetDaemonPorts) + app.All("/:id/webdav/*", handlers.Server.ServeDaemonWebDAV) +} diff --git a/apps/druid/adapters/http/handlers/routes_test.go b/apps/druid/adapters/http/handlers/routes_test.go new file mode 100644 index 00000000..c2486836 --- /dev/null +++ b/apps/druid/adapters/http/handlers/routes_test.go @@ -0,0 +1,48 @@ +package handlers + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/gofiber/fiber/v2" +) + +func TestRouteSplitKeepsManagementAndPublicSurfacesSeparate(t *testing.T) { + handlers := RouteHandlers{Server: NewRuntimeServer(NewHealthHandler(), nil), Websocket: &WebsocketHandler{}} + + management := fiber.New(fiber.Config{DisableStartupMessage: true}) + RegisterManagementRoutes(management, handlers) + if status := requestStatus(t, management, "/api/v1/health"); status != http.StatusOK { + t.Fatalf("management health status = %d, want 200", status) + } + if status := requestStatus(t, management, "/scroll-1/api/v1/health"); status != http.StatusNotFound { + t.Fatalf("management public health status = %d, want 404", status) + } + + public := fiber.New(fiber.Config{DisableStartupMessage: true}) + RegisterPublicRoutes(public, handlers) + if status := requestStatus(t, public, "/scroll-1/api/v1/health"); status != http.StatusOK { + t.Fatalf("public health status = %d, want 200", status) + } + if status := requestStatus(t, public, "/api/v1/scrolls"); status != http.StatusNotFound { + t.Fatalf("public management list status = %d, want 404", status) + } + if status := requestStatus(t, public, "/scroll-1/api/v1/token"); status != http.StatusNotFound { + t.Fatalf("public token compatibility route status = %d, want 404", status) + } + if status := requestStatus(t, public, "/scroll-1/api/v1/watch/status"); status != http.StatusNotFound { + t.Fatalf("public watch compatibility route status = %d, want 404", status) + } +} + +func requestStatus(t *testing.T, app *fiber.App, path string) int { + t.Helper() + req := httptest.NewRequest(http.MethodGet, path, nil) + resp, err := app.Test(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + return resp.StatusCode +} diff --git a/apps/druid/adapters/http/handlers/scroll_handler.go b/apps/druid/adapters/http/handlers/scroll_handler.go index 360ba717..75bd968e 100644 --- a/apps/druid/adapters/http/handlers/scroll_handler.go +++ b/apps/druid/adapters/http/handlers/scroll_handler.go @@ -2,6 +2,11 @@ package handlers import ( "errors" + "mime" + "os" + "path/filepath" + "strconv" + "strings" "github.com/gofiber/fiber/v2" appservices "github.com/highcard-dev/daemon/apps/druid/core/services" @@ -11,15 +16,37 @@ import ( ) type ScrollHandler struct { - supervisor *appservices.RuntimeSupervisor + supervisor *appservices.RuntimeSupervisor + consoleService *services.ConsoleManager + logService *services.LogManager } -func NewScrollHandler(supervisor *appservices.RuntimeSupervisor) *ScrollHandler { +func NewScrollHandler(supervisor *appservices.RuntimeSupervisor, consoleService *services.ConsoleManager, logService *services.LogManager) *ScrollHandler { return &ScrollHandler{ - supervisor: supervisor, + supervisor: supervisor, + consoleService: consoleService, + logService: logService, } } +func runtimeRoots(scrollRoot *string, dataRoot *string) (string, string, error) { + scroll := "" + if scrollRoot != nil { + scroll = *scrollRoot + } + data := "" + if dataRoot != nil { + data = *dataRoot + } + if scroll == "" && data == "" { + return "", "", nil + } + if scroll == "" || data == "" || scroll != data { + return "", "", errors.New("scroll_root and data_root are legacy fields and must be omitted or equal") + } + return scroll, scroll, nil +} + func (h *ScrollHandler) ListScrolls(c *fiber.Ctx) error { scrolls, err := h.supervisor.List() if err != nil { @@ -39,15 +66,15 @@ func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { } else if request.Id != nil && *request.Id != "" { name = *request.Id } - scrollRoot := "" - if request.ScrollRoot != nil { - scrollRoot = *request.ScrollRoot + scrollRoot, dataRoot, err := runtimeRoots(request.ScrollRoot, request.DataRoot) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) } - dataRoot := "" - if request.DataRoot != nil { - dataRoot = *request.DataRoot + start := true + if request.Start != nil { + start = *request.Start } - runtimeScroll, err := h.supervisor.Create(request.Artifact, name, scrollRoot, dataRoot) + runtimeScroll, err := h.supervisor.Create(request.Artifact, name, scrollRoot, dataRoot, start) if err != nil { if errors.Is(err, services.ErrScrollAlreadyExists) { return fiber.NewError(fiber.StatusConflict, err.Error()) @@ -60,6 +87,42 @@ func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { return c.Status(fiber.StatusCreated).JSON(runtimeScroll) } +func (h *ScrollHandler) EnsureScroll(c *fiber.Ctx) error { + var request struct { + ID *string `json:"id"` + Name *string `json:"name"` + Artifact string `json:"artifact"` + ScrollRoot *string `json:"scroll_root"` + DataRoot *string `json:"data_root"` + Start *bool `json:"start"` + } + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + name := "" + if request.Name != nil && *request.Name != "" { + name = *request.Name + } else if request.ID != nil && *request.ID != "" { + name = *request.ID + } + scrollRoot, dataRoot, err := runtimeRoots(request.ScrollRoot, request.DataRoot) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + start := true + if request.Start != nil { + start = *request.Start + } + runtimeScroll, err := h.supervisor.Ensure(request.Artifact, name, scrollRoot, dataRoot, start) + if err != nil { + if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + return c.JSON(runtimeScroll) +} + func (h *ScrollHandler) GetScroll(c *fiber.Ctx, id string) error { runtimeScroll, err := h.getScroll(id) if err != nil { @@ -73,7 +136,7 @@ func (h *ScrollHandler) DeleteScroll(c *fiber.Ctx, id string) error { if err != nil { return err } - if err := h.supervisor.Delete(id); err != nil { + if err := h.supervisor.DeleteWithPolicy(id, c.QueryBool("purge_data", false)); err != nil { return err } return c.JSON(api.DeletedScroll{ @@ -82,6 +145,28 @@ func (h *ScrollHandler) DeleteScroll(c *fiber.Ctx, id string) error { }) } +func (h *ScrollHandler) StartScroll(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + runtimeScroll, err := h.supervisor.StartScroll(id) + if err != nil { + return err + } + return c.JSON(runtimeScroll) +} + +func (h *ScrollHandler) StopScroll(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + runtimeScroll, err := h.supervisor.Stop(id) + if err != nil { + return err + } + return c.JSON(runtimeScroll) +} + func (h *ScrollHandler) RunScrollCommand(c *fiber.Ctx, id string, command string) error { runtimeScroll, err := h.getScroll(id) if err != nil { @@ -94,6 +179,157 @@ func (h *ScrollHandler) RunScrollCommand(c *fiber.Ctx, id string, command string return c.JSON(updated) } +func (h *ScrollHandler) GetScrollConfig(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + scrollFile, err := h.supervisor.ScrollFile(id) + if err != nil { + return err + } + return c.JSON(scrollFile) +} + +func (h *ScrollHandler) GetScrollQueue(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + queue, err := h.supervisor.Queue(id) + if err != nil { + return err + } + return c.JSON(queue) +} + +func (h *ScrollHandler) GetScrollProcedures(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + procedures, err := h.supervisor.Procedures(id) + if err != nil { + return err + } + return c.JSON(procedures) +} + +func (h *ScrollHandler) GetScrollConsoles(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + prefix := id + "/" + consoles := map[string]*domain.Console{} + for consoleID, console := range h.consoleService.GetConsoles() { + if strings.HasPrefix(consoleID, prefix) { + consoles[strings.TrimPrefix(consoleID, prefix)] = console + } + } + return c.JSON(consoles) +} + +func (h *ScrollHandler) GetScrollLogs(c *fiber.Ctx, id string) error { + logs, err := h.scrollLogs(id) + if err != nil { + return err + } + return c.JSON(logs) +} + +func (h *ScrollHandler) GetDaemonScroll(c *fiber.Ctx) error { + return h.GetScrollConfig(c, c.Params("id")) +} + +func (h *ScrollHandler) RunDaemonCommand(c *fiber.Ctx) error { + var request struct { + Command string `json:"command"` + } + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + if request.Command == "" { + return fiber.NewError(fiber.StatusBadRequest, "command is required") + } + if _, err := h.supervisor.Run(c.Params("id"), request.Command); err != nil { + return err + } + return c.SendStatus(fiber.StatusOK) +} + +func (h *ScrollHandler) GetDaemonQueue(c *fiber.Ctx) error { + return h.GetScrollQueue(c, c.Params("id")) +} + +func (h *ScrollHandler) GetDaemonProcedures(c *fiber.Ctx) error { + return h.GetScrollProcedures(c, c.Params("id")) +} + +func (h *ScrollHandler) GetDaemonConsoles(c *fiber.Ctx) error { + return h.GetScrollConsoles(c, c.Params("id")) +} + +func (h *ScrollHandler) GetDaemonLogs(c *fiber.Ctx) error { + logs, err := h.scrollLogs(c.Params("id")) + if err != nil { + return err + } + streams := make([]map[string]any, 0, len(logs)) + for stream, log := range logs { + streams = append(streams, map[string]any{"stream": stream, "log": log}) + } + return c.JSON(streams) +} + +func (h *ScrollHandler) GetDaemonStreamLogs(c *fiber.Ctx) error { + logs, err := h.scrollLogs(c.Params("id")) + if err != nil { + return err + } + stream := c.Params("stream") + return c.JSON(map[string]any{"stream": stream, "log": logs[stream]}) +} + +func (h *ScrollHandler) GetDaemonPorts(c *fiber.Ctx) error { + return h.GetScrollPorts(c, c.Params("id")) +} + +func (h *ScrollHandler) ServeDaemonWebDAV(c *fiber.Ctx) error { + c.Set("DAV", "1") + c.Set("Allow", "OPTIONS, GET, HEAD, PUT") + if c.Method() == fiber.MethodOptions { + return c.SendStatus(fiber.StatusNoContent) + } + relativePath := strings.TrimPrefix(c.Params("*"), "/") + if c.Method() == fiber.MethodPut { + if err := h.supervisor.WriteDataFile(c.Params("id"), relativePath, c.Body()); err != nil { + if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + return c.SendStatus(fiber.StatusNoContent) + } + if c.Method() != fiber.MethodGet && c.Method() != fiber.MethodHead { + return fiber.NewError(fiber.StatusMethodNotAllowed, "unsupported runtime WebDAV method") + } + data, err := h.supervisor.DataFile(c.Params("id"), relativePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) || strings.Contains(err.Error(), "No such file") { + return fiber.NewError(fiber.StatusNotFound, err.Error()) + } + if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + if contentType := mime.TypeByExtension(filepath.Ext(relativePath)); contentType != "" { + c.Set(fiber.HeaderContentType, contentType) + } + c.Set(fiber.HeaderContentLength, strconv.Itoa(len(data))) + if c.Method() == fiber.MethodHead { + return c.SendStatus(fiber.StatusOK) + } + return c.Send(data) +} + func (h *ScrollHandler) GetScrollPorts(c *fiber.Ctx, id string) error { runtimeScroll, err := h.getScroll(id) if err != nil { @@ -106,6 +342,78 @@ func (h *ScrollHandler) GetScrollPorts(c *fiber.Ctx, id string) error { return c.JSON(statuses) } +func (h *ScrollHandler) GetScrollRoutingTargets(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + targets, err := h.supervisor.RoutingTargets(id) + if err != nil { + if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + return c.JSON(targets) +} + +func (h *ScrollHandler) ApplyScrollRouting(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + var request struct { + Assignments []domain.RuntimeRouteAssignment `json:"assignments"` + } + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + runtimeScroll, err := h.supervisor.ApplyRouting(id, request.Assignments) + if err != nil { + return err + } + return c.JSON(runtimeScroll) +} + +func (h *ScrollHandler) BackupScroll(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + var request struct { + Artifact string `json:"artifact"` + } + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + runtimeScroll, err := h.supervisor.Backup(id, request.Artifact) + if err != nil { + if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + return c.JSON(runtimeScroll) +} + +func (h *ScrollHandler) RestoreScroll(c *fiber.Ctx, id string) error { + if _, err := h.getScroll(id); err != nil { + return err + } + var request struct { + Artifact string `json:"artifact"` + Restart bool `json:"restart"` + } + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + runtimeScroll, err := h.supervisor.Restore(id, request.Artifact, request.Restart) + if err != nil { + if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { + return fiber.NewError(fiber.StatusNotImplemented, err.Error()) + } + return err + } + return c.JSON(runtimeScroll) +} + func (h *ScrollHandler) getScroll(id string) (*domain.RuntimeScroll, error) { runtimeScroll, err := h.supervisor.Get(id) if errors.Is(err, services.ErrScrollNotFound) { @@ -113,3 +421,24 @@ func (h *ScrollHandler) getScroll(id string) (*domain.RuntimeScroll, error) { } return runtimeScroll, err } + +func (h *ScrollHandler) scrollLogs(id string) (map[string][]string, error) { + if _, err := h.getScroll(id); err != nil { + return nil, err + } + prefix := id + "/" + logs := map[string][]string{} + for streamID, log := range h.logService.GetStreams() { + if !strings.HasPrefix(streamID, prefix) { + continue + } + response := make(chan []byte, 100) + log.Req <- response + lines := []string{} + for line := range response { + lines = append(lines, string(line)) + } + logs[strings.TrimPrefix(streamID, prefix)] = lines + } + return logs, nil +} diff --git a/apps/druid/adapters/http/handlers/scroll_handler_test.go b/apps/druid/adapters/http/handlers/scroll_handler_test.go new file mode 100644 index 00000000..cdf9fa2b --- /dev/null +++ b/apps/druid/adapters/http/handlers/scroll_handler_test.go @@ -0,0 +1,30 @@ +package handlers + +import "testing" + +func TestRuntimeRootsAcceptsOmittedOrEqualLegacyRoots(t *testing.T) { + scrollRoot, dataRoot, err := runtimeRoots(nil, nil) + if err != nil { + t.Fatal(err) + } + if scrollRoot != "" || dataRoot != "" { + t.Fatalf("roots = %q/%q, want empty", scrollRoot, dataRoot) + } + + root := "/runtime/root" + scrollRoot, dataRoot, err = runtimeRoots(&root, &root) + if err != nil { + t.Fatal(err) + } + if scrollRoot != root || dataRoot != root { + t.Fatalf("roots = %q/%q, want %q", scrollRoot, dataRoot, root) + } +} + +func TestRuntimeRootsRejectsSplitLegacyRoots(t *testing.T) { + scrollRoot := "/runtime/spec" + dataRoot := "/runtime/data" + if _, _, err := runtimeRoots(&scrollRoot, &dataRoot); err == nil { + t.Fatal("expected split roots to fail") + } +} diff --git a/apps/druid/adapters/http/handlers/websocket_handler.go b/apps/druid/adapters/http/handlers/websocket_handler.go index 6dbda07b..0875565d 100644 --- a/apps/druid/adapters/http/handlers/websocket_handler.go +++ b/apps/druid/adapters/http/handlers/websocket_handler.go @@ -19,6 +19,17 @@ func NewWebsocketHandler(consoleService *services.ConsoleManager) *WebsocketHand func (h *WebsocketHandler) AttachConsole(c *websocket.Conn) { consoleID := c.Params("console") + if id := c.Params("id"); id != "" { + consoleID = id + "/" + consoleID + } + h.attach(c, consoleID) +} + +func (h *WebsocketHandler) AttachScrollConsole(c *websocket.Conn) { + h.AttachConsole(c) +} + +func (h *WebsocketHandler) attach(c *websocket.Conn, consoleID string) { defer c.Close() console := h.consoleService.GetConsole(consoleID) diff --git a/apps/druid/core/services/runtime_controller.go b/apps/druid/core/services/runtime_controller.go index a705afeb..7e6f6e4c 100644 --- a/apps/druid/core/services/runtime_controller.go +++ b/apps/druid/core/services/runtime_controller.go @@ -22,6 +22,7 @@ import ( ) var ErrRuntimeMaterializationUnsupported = errors.New("runtime backend does not support daemon materialization") +var ErrRuntimeOperationUnsupported = errors.New("runtime backend does not support this operation") var newKubernetesRuntimeStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { return runtimekubernetes.NewConfigMapStateStore(config) @@ -114,7 +115,11 @@ func (s *RuntimeSupervisor) Start() error { return nil } -func (s *RuntimeSupervisor) Create(artifact string, name string, scrollRoot string, dataRoot string) (*domain.RuntimeScroll, error) { +func (s *RuntimeSupervisor) Create(artifact string, name string, scrollRoot string, dataRoot string, start bool) (*domain.RuntimeScroll, error) { + return s.create(artifact, name, scrollRoot, dataRoot, start) +} + +func (s *RuntimeSupervisor) create(artifact string, name string, scrollRoot string, dataRoot string, autoStart bool) (*domain.RuntimeScroll, error) { runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) if err != nil { return nil, err @@ -148,12 +153,37 @@ func (s *RuntimeSupervisor) Create(artifact string, name string, scrollRoot stri session, err := s.startSession(runtimeScroll) if err != nil { runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() _ = s.store.UpdateScroll(runtimeScroll) return nil, err } - if err := session.AutoStartServe(); err != nil { - runtimeScroll.Status = domain.RuntimeScrollStatusError - _ = s.store.UpdateScroll(runtimeScroll) + if autoStart { + if err := session.AutoStartServe(); err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + } + return runtimeScroll, nil +} + +func (s *RuntimeSupervisor) Ensure(artifact string, name string, scrollRoot string, dataRoot string, start bool) (*domain.RuntimeScroll, error) { + id := coreservices.RuntimeScrollIDFromName(name) + if id != "" { + runtimeScroll, err := s.store.GetScroll(id) + if err == nil { + if start { + return s.StartScroll(runtimeScroll.ID) + } + return runtimeScroll, nil + } + if !errors.Is(err, coreservices.ErrScrollNotFound) { + return nil, err + } + } + runtimeScroll, err := s.create(artifact, name, scrollRoot, dataRoot, start) + if err != nil { return nil, err } return runtimeScroll, nil @@ -168,16 +198,78 @@ func (s *RuntimeSupervisor) Get(id string) (*domain.RuntimeScroll, error) { } func (s *RuntimeSupervisor) Delete(id string) error { + return s.DeleteWithPolicy(id, false) +} + +func (s *RuntimeSupervisor) DeleteWithPolicy(id string, purgeData bool) error { s.mu.Lock() session := s.sessions[id] delete(s.sessions, id) s.mu.Unlock() - if session != nil { - session.Shutdown() + if session == nil { + var err error + session, err = s.sessionFor(id) + if err != nil { + return err + } + s.mu.Lock() + delete(s.sessions, id) + s.mu.Unlock() + } + if err := session.DeleteRuntime(purgeData); err != nil { + return err } + session.Shutdown() return s.store.DeleteScroll(id) } +func (s *RuntimeSupervisor) StartScroll(id string) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + if err := session.AutoStartServe(); err != nil { + session.markError(err) + return nil, err + } + session.mu.Lock() + session.runtimeScroll.Status = deriveRuntimeScrollStatus(session.runtimeScroll.Commands, session.scrollService.GetFile().Commands) + if session.runtimeScroll.Status == domain.RuntimeScrollStatusCreated { + session.runtimeScroll.Status = domain.RuntimeScrollStatusRunning + } + session.runtimeScroll.LastError = "" + err = s.store.UpdateScroll(session.runtimeScroll) + id = session.runtimeScroll.ID + session.mu.Unlock() + if err != nil { + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) Stop(id string) (*domain.RuntimeScroll, error) { + s.mu.Lock() + session := s.sessions[id] + delete(s.sessions, id) + s.mu.Unlock() + if session == nil { + var err error + session, err = s.sessionFor(id) + if err != nil { + return nil, err + } + s.mu.Lock() + delete(s.sessions, id) + s.mu.Unlock() + } + if err := session.StopRuntime(); err != nil { + session.markError(err) + return nil, err + } + session.Shutdown() + return s.store.GetScroll(id) +} + func (s *RuntimeSupervisor) Run(id string, command string) (*domain.RuntimeScroll, error) { session, err := s.sessionFor(id) if err != nil { @@ -194,6 +286,105 @@ func (s *RuntimeSupervisor) Ports(id string) ([]domain.RuntimePortStatus, error) return session.Ports() } +func (s *RuntimeSupervisor) RoutingTargets(id string) ([]domain.RuntimeRoutingTarget, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.RoutingTargets() +} + +func (s *RuntimeSupervisor) ApplyRouting(id string, assignments []domain.RuntimeRouteAssignment) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.ApplyRouting(assignments) +} + +func (s *RuntimeSupervisor) Backup(id string, artifact string) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + if err := session.Backup(context.Background(), artifact); err != nil { + session.markError(err) + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) Restore(id string, artifact string, restart bool) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + if err := session.Restore(context.Background(), artifact); err != nil { + session.markError(err) + return nil, err + } + if restart { + return s.StartScroll(id) + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) DataFile(id string, relativePath string) ([]byte, error) { + runtimeScroll, err := s.store.GetScroll(id) + if err != nil { + return nil, err + } + runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) + if err != nil { + return nil, err + } + fileBackend, ok := runtimeService.(ports.RuntimeFileBackendInterface) + if !ok { + return nil, ErrRuntimeOperationUnsupported + } + return fileBackend.ReadDataFile(context.Background(), runtimeScroll.DataRoot, relativePath) +} + +func (s *RuntimeSupervisor) WriteDataFile(id string, relativePath string, data []byte) error { + runtimeScroll, err := s.store.GetScroll(id) + if err != nil { + return err + } + runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) + if err != nil { + return err + } + fileBackend, ok := runtimeService.(ports.RuntimeFileBackendInterface) + if !ok { + return ErrRuntimeOperationUnsupported + } + return fileBackend.WriteDataFile(context.Background(), runtimeScroll.DataRoot, relativePath, data) +} + +func (s *RuntimeSupervisor) ScrollFile(id string) (*domain.File, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.scrollService.GetFile(), nil +} + +func (s *RuntimeSupervisor) Queue(id string) (map[string]domain.ScrollLockStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.queueManager.GetQueue(), nil +} + +func (s *RuntimeSupervisor) Procedures(id string) (map[string]domain.ScrollLockStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.queueManager.GetQueue(), nil +} + func (s *RuntimeSupervisor) sessionFor(id string) (*RuntimeSession, error) { s.mu.Lock() session := s.sessions[id] @@ -236,6 +427,7 @@ func (s *RuntimeSupervisor) startSession(runtimeScroll *domain.RuntimeScroll) (* func (s *RuntimeSupervisor) markScrollError(runtimeScroll *domain.RuntimeScroll, err error) { logger.Log().Error("failed to restore runtime scroll", zap.String("scroll", runtimeScroll.ID), zap.Error(err)) runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() if runtimeScroll.Commands == nil { runtimeScroll.Commands = map[string]domain.LockStatus{} } @@ -282,18 +474,24 @@ func NewRuntimeSession( if err != nil { return nil, err } - processLauncher, err := coreservices.NewProcedureLauncher(scrollService, runtimeService, runtimeScroll.DataRoot) - if err != nil { - return nil, err - } - queueManager := coreservices.NewQueueManager(scrollService, processLauncher) session := &RuntimeSession{ store: store, runtimeScroll: runtimeScroll, scrollService: scrollService, - queueManager: queueManager, runtimeBackend: runtimeService, } + processLauncher, err := coreservices.NewProcedureLauncherForRuntime(scrollService, runtimeService, runtimeScroll.DataRoot, runtimeScroll.ID, runtimeScroll.ScrollName, func() []domain.RuntimeRouteAssignment { + session.mu.Lock() + defer session.mu.Unlock() + routing := make([]domain.RuntimeRouteAssignment, len(session.runtimeScroll.Routing)) + copy(routing, session.runtimeScroll.Routing) + return routing + }) + if err != nil { + return nil, err + } + queueManager := coreservices.NewQueueManager(scrollService, processLauncher) + session.queueManager = queueManager queueManager.SetStatusObserver(session.persistCommandStatus) return session, nil } @@ -311,8 +509,39 @@ func (s *RuntimeSession) Start() { func (s *RuntimeSession) Hydrate() error { s.mu.Lock() statuses := copyCommandStatuses(s.runtimeScroll.Commands) + runtimeStatus := s.runtimeScroll.Status s.mu.Unlock() + commands := s.scrollService.GetFile().Commands if len(statuses) > 0 { + filtered := map[string]domain.LockStatus{} + removedStaleStatus := false + for commandName, status := range statuses { + command := commands[commandName] + if command == nil { + removedStaleStatus = true + continue + } + // Kubernetes keeps persistent workloads alive; do not requeue them just because + // the singleton API process restarted. + if runtimeStatus == domain.RuntimeScrollStatusRunning && status.Status == domain.ScrollLockStatusDone && command.Run == domain.RunModePersistent { + continue + } + filtered[commandName] = status + } + if removedStaleStatus { + s.mu.Lock() + for commandName := range s.runtimeScroll.Commands { + if commands[commandName] == nil { + delete(s.runtimeScroll.Commands, commandName) + } + } + err := s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + if err != nil { + return err + } + } + statuses = filtered if err := s.queueManager.HydrateCommandStatuses(statuses); err != nil { return err } @@ -335,6 +564,15 @@ func (s *RuntimeSession) AutoStartServe() error { if err := WriteRuntimeConfig(s.runtimeScroll, s.scrollService.GetFile(), s.runtimeBackend.Name()); err != nil { return err } + if command := s.scrollService.GetFile().Commands[serveCommand]; command != nil && command.Run == domain.RunModePersistent { + s.mu.Lock() + status, ok := s.runtimeScroll.Commands[serveCommand] + runtimeStatus := s.runtimeScroll.Status + s.mu.Unlock() + if ok && status.Status == domain.ScrollLockStatusDone && runtimeStatus == domain.RuntimeScrollStatusRunning { + return nil + } + } if err := s.queueManager.AddForcedItem(serveCommand); err != nil && !errors.Is(err, coreservices.ErrAlreadyInQueue) { return err } @@ -348,14 +586,14 @@ func (s *RuntimeSession) Run(command string) (*domain.RuntimeScroll, error) { s.refreshCommandState() targetCommand, err := s.scrollService.GetCommand(command) if err != nil { - s.markError() + s.markError(err) return nil, err } longRunning := targetCommand.Run == domain.RunModeRestart || targetCommand.Run == domain.RunModePersistent s.rememberDoneDependencies(targetCommand, map[string]bool{}) if err := s.queueManager.AddTempItem(command); err != nil { - s.markError() + s.markError(err) return nil, err } if !longRunning { @@ -378,6 +616,17 @@ func (s *RuntimeSession) refreshCommandState() { if err != nil { return } + commands := s.scrollService.GetFile().Commands + removedStaleStatus := false + for commandName := range fresh.Commands { + if commands[commandName] == nil { + delete(fresh.Commands, commandName) + removedStaleStatus = true + } + } + if removedStaleStatus { + _ = s.store.UpdateScroll(fresh) + } s.mu.Lock() s.runtimeScroll.Commands = copyCommandStatuses(fresh.Commands) s.runtimeScroll.Status = fresh.Status @@ -411,6 +660,125 @@ func (s *RuntimeSession) Ports() ([]domain.RuntimePortStatus, error) { return s.runtimeBackend.ExpectedPorts(runtimeScroll.DataRoot, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) } +func (s *RuntimeSession) RoutingTargets() ([]domain.RuntimeRoutingTarget, error) { + routingBackend, ok := s.runtimeBackend.(ports.RuntimeRoutingBackendInterface) + if !ok { + return nil, ErrRuntimeOperationUnsupported + } + s.mu.Lock() + runtimeScroll := *s.runtimeScroll + s.mu.Unlock() + return routingBackend.RoutingTargets(runtimeScroll.DataRoot, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) +} + +func (s *RuntimeSession) ApplyRouting(assignments []domain.RuntimeRouteAssignment) (*domain.RuntimeScroll, error) { + s.mu.Lock() + s.runtimeScroll.Routing = assignments + s.runtimeScroll.LastError = "" + err := s.store.UpdateScroll(s.runtimeScroll) + id := s.runtimeScroll.ID + s.mu.Unlock() + if err != nil { + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSession) StopRuntime() error { + lifecycleBackend, ok := s.runtimeBackend.(ports.RuntimeLifecycleBackendInterface) + if !ok { + return ErrRuntimeOperationUnsupported + } + s.mu.Lock() + dataRoot := s.runtimeScroll.DataRoot + s.mu.Unlock() + if err := lifecycleBackend.StopRuntime(dataRoot); err != nil { + return err + } + s.mu.Lock() + s.runtimeScroll.Status = domain.RuntimeScrollStatusStopped + s.runtimeScroll.LastError = "" + err := s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + return err +} + +func (s *RuntimeSession) DeleteRuntime(purgeData bool) error { + lifecycleBackend, ok := s.runtimeBackend.(ports.RuntimeLifecycleBackendInterface) + if !ok { + return ErrRuntimeOperationUnsupported + } + s.mu.Lock() + dataRoot := s.runtimeScroll.DataRoot + s.mu.Unlock() + return lifecycleBackend.DeleteRuntime(dataRoot, purgeData) +} + +func (s *RuntimeSession) Backup(ctx context.Context, artifact string) error { + backupBackend, ok := s.runtimeBackend.(ports.RuntimeBackupBackendInterface) + if !ok { + return ErrRuntimeOperationUnsupported + } + s.mu.Lock() + dataRoot := s.runtimeScroll.DataRoot + s.mu.Unlock() + return backupBackend.BackupRuntime(ctx, dataRoot, artifact) +} + +func (s *RuntimeSession) Restore(ctx context.Context, artifact string) error { + backupBackend, ok := s.runtimeBackend.(ports.RuntimeBackupBackendInterface) + if !ok { + return ErrRuntimeOperationUnsupported + } + s.mu.Lock() + dataRoot := s.runtimeScroll.DataRoot + s.mu.Unlock() + if err := backupBackend.RestoreRuntime(ctx, dataRoot, artifact); err != nil { + return err + } + scrollYAML, err := s.runtimeBackend.ReadScrollFile(dataRoot) + if err != nil { + return err + } + scrollService, err := coreservices.NewCachedScrollService(dataRoot, scrollYAML) + if err != nil { + return err + } + processLauncher, err := coreservices.NewProcedureLauncherForRuntime(scrollService, s.runtimeBackend, dataRoot, s.runtimeScroll.ID, s.runtimeScroll.ScrollName, func() []domain.RuntimeRouteAssignment { + s.mu.Lock() + defer s.mu.Unlock() + routing := make([]domain.RuntimeRouteAssignment, len(s.runtimeScroll.Routing)) + copy(routing, s.runtimeScroll.Routing) + return routing + }) + if err != nil { + return err + } + queueManager := coreservices.NewQueueManager(scrollService, processLauncher) + queueManager.SetStatusObserver(s.persistCommandStatus) + + s.mu.Lock() + commands := scrollService.GetFile().Commands + for commandName := range s.runtimeScroll.Commands { + if commands[commandName] == nil { + delete(s.runtimeScroll.Commands, commandName) + } + } + s.runtimeScroll.Artifact = artifact + s.runtimeScroll.ScrollRoot = dataRoot + s.runtimeScroll.ScrollYAML = string(scrollYAML) + s.runtimeScroll.Status = domain.RuntimeScrollStatusStopped + s.runtimeScroll.LastError = "" + s.scrollService = scrollService + s.queueManager = queueManager + if s.started { + go queueManager.Work() + } + err = s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + return err +} + func (s *RuntimeSession) Shutdown() { s.queueManager.Shutdown() } @@ -418,9 +786,18 @@ func (s *RuntimeSession) Shutdown() { func (s *RuntimeSession) persistCommandStatus(command string, status domain.ScrollLockStatus, exitCode *int) { s.mu.Lock() defer s.mu.Unlock() + commands := s.scrollService.GetFile().Commands + if commands[command] == nil { + return + } if s.runtimeScroll.Commands == nil { s.runtimeScroll.Commands = map[string]domain.LockStatus{} } + for commandName := range s.runtimeScroll.Commands { + if commands[commandName] == nil { + delete(s.runtimeScroll.Commands, commandName) + } + } s.runtimeScroll.Commands[command] = domain.LockStatus{ Status: status, ExitCode: exitCode, @@ -432,10 +809,13 @@ func (s *RuntimeSession) persistCommandStatus(command string, status domain.Scro } } -func (s *RuntimeSession) markError() { +func (s *RuntimeSession) markError(err error) { s.mu.Lock() defer s.mu.Unlock() s.runtimeScroll.Status = domain.RuntimeScrollStatusError + if err != nil { + s.runtimeScroll.LastError = err.Error() + } _ = s.store.UpdateScroll(s.runtimeScroll) } diff --git a/apps/druid/core/services/runtime_controller_test.go b/apps/druid/core/services/runtime_controller_test.go index e3400158..07a764dc 100644 --- a/apps/druid/core/services/runtime_controller_test.go +++ b/apps/druid/core/services/runtime_controller_test.go @@ -105,6 +105,53 @@ func TestRuntimeSessionHydrateSkipsMissingServe(t *testing.T) { } } +func TestRuntimeSessionHydrateDropsStaleCommandStatus(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "missing": {Status: domain.ScrollLockStatusDone}, + }, cachedScrollYAML("")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + updated, err := session.store.GetScroll(session.runtimeScroll.ID) + if err != nil { + t.Fatal(err) + } + if _, ok := updated.Commands["missing"]; ok { + t.Fatalf("stale command was not removed: %#v", updated.Commands) + } +} + +func TestRuntimeSessionHydrateDoesNotRequeueRunningPersistentServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, `name: cached +desc: Cached scroll +version: 0.1.0 +app_version: "1.0" +serve: start +commands: + start: + run: persistent + procedures: + - image: alpine:3.20 + command: ["true"] +`) + session.runtimeScroll.Status = domain.RuntimeScrollStatusRunning + if err := session.store.UpdateScroll(session.runtimeScroll); err != nil { + t.Fatal(err) + } + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + if queue := session.queueManager.GetQueue(); len(queue) != 0 { + t.Fatalf("queue = %#v, want empty", queue) + } +} + func TestRuntimeSessionAutoStartsServeOnCreatePath(t *testing.T) { session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) @@ -115,6 +162,79 @@ func TestRuntimeSessionAutoStartsServeOnCreatePath(t *testing.T) { assertQueued(t, session, "start") } +func TestRuntimeSupervisorEnsureCanCreateWithoutStarting(t *testing.T) { + scrollRoot := t.TempDir() + dataRoot := scrollRoot + if err := os.WriteFile(filepath.Join(scrollRoot, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { + t.Fatal(err) + } + store := coreservices.NewRuntimeStateStore(t.TempDir()) + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + coreservices.NewConsoleManager(coreservices.NewLogManager()), + "docker", + ) + + runtimeScroll, err := supervisor.Ensure("local", "quiet-scroll", scrollRoot, dataRoot, false) + if err != nil { + t.Fatal(err) + } + + if runtimeScroll.Status != domain.RuntimeScrollStatusCreated { + t.Fatalf("status = %s, want created", runtimeScroll.Status) + } + if len(runtimeScroll.Commands) != 0 { + t.Fatalf("commands = %#v, want empty", runtimeScroll.Commands) + } +} + +func TestRuntimeSupervisorCreateCanCreateWithoutStarting(t *testing.T) { + root := t.TempDir() + if err := os.WriteFile(filepath.Join(root, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { + t.Fatal(err) + } + store := coreservices.NewRuntimeStateStore(t.TempDir()) + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + coreservices.NewConsoleManager(coreservices.NewLogManager()), + "docker", + ) + + runtimeScroll, err := supervisor.Create("local", "quiet-create", root, root, false) + if err != nil { + t.Fatal(err) + } + + if runtimeScroll.Status != domain.RuntimeScrollStatusCreated { + t.Fatalf("status = %s, want created", runtimeScroll.Status) + } + if len(runtimeScroll.Commands) != 0 { + t.Fatalf("commands = %#v, want empty", runtimeScroll.Commands) + } +} + +func TestRuntimeSessionApplyRoutingPersistsAssignments(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) + + updated, err := session.ApplyRouting([]domain.RuntimeRouteAssignment{{ + Name: "web-http", + PortName: "http", + Host: "scroll.example.test", + PublicPort: 443, + URL: "https://scroll.example.test", + Protocol: "https", + }}) + if err != nil { + t.Fatal(err) + } + + if len(updated.Routing) != 1 || updated.Routing[0].Host != "scroll.example.test" { + t.Fatalf("routing = %#v", updated.Routing) + } +} + func TestDeriveRuntimeScrollStatusTreatsDonePersistentAsRunning(t *testing.T) { status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ "start": {Status: domain.ScrollLockStatusDone}, diff --git a/config/helm-charts/druid-cli/Chart.yaml b/config/helm-charts/druid-cli/Chart.yaml new file mode 100644 index 00000000..74f66fca --- /dev/null +++ b/config/helm-charts/druid-cli/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: druid-cli +description: Cluster-singleton Druid runtime API for Kubernetes-owned scroll workloads. +type: application +version: 0.1.0 +appVersion: "dev" diff --git a/config/helm-charts/druid-cli/chart_test.go b/config/helm-charts/druid-cli/chart_test.go new file mode 100644 index 00000000..843670e6 --- /dev/null +++ b/config/helm-charts/druid-cli/chart_test.go @@ -0,0 +1,83 @@ +package druidcli_test + +import ( + "os/exec" + "strings" + "testing" +) + +func TestChartRendersDefaultAndCustomValues(t *testing.T) { + if _, err := exec.LookPath("helm"); err != nil { + t.Skip("helm is not installed") + } + + defaultManifest := helmTemplate(t) + for _, want := range []string{ + "kind: Deployment", + "kind: Service", + "kind: Role", + "kind: RoleBinding", + "--runtime=kubernetes", + "--listen=:8081", + "--public-listen=:8082", + "name: management", + "name: public", + "DRUID_K8S_PULL_IMAGE", + "hubble-relay.kube-system.svc.cluster.local:80", + } { + if !strings.Contains(defaultManifest, want) { + t.Fatalf("default manifest does not contain %q", want) + } + } + if strings.Contains(defaultManifest, `resources: ["*"]`) || strings.Contains(defaultManifest, `verbs: ["*"]`) { + t.Fatal("chart rendered wildcard RBAC") + } + + customManifest := helmTemplate(t, + "--set", "auth.enabled=true", + "--set", "auth.existingSecret=druid-runtime-token", + "--set", "runtime.namespaces.mode=all", + "--set", "runtime.storageClass=local-path", + "--set", "runtime.registryPlainHTTP=true", + "--set", "runtime.pullImage=registry.local/druid-client:e2e", + "--set", "runtime.helperImage=busybox:1.36", + "--set", "runtime.kubeconfigSecret.name=druid-kubeconfig", + "--set", "hubble.relayAddr=hubble.example:80", + "--set", "networkPolicy.enabled=true", + "--set", "ingress.enabled=true", + "--set", "ingress.hosts[0].host=runtime.example.test", + "--set", "ingress.hosts[0].paths[0].path=/", + ) + for _, want := range []string{ + "kind: ClusterRole", + "kind: ClusterRoleBinding", + "name: DRUID_INTERNAL_TOKEN", + "name: \"druid-runtime-token\"", + "value: \"registry.local/druid-client:e2e\"", + "value: \"busybox:1.36\"", + "value: \"true\"", + "value: /etc/druid/kubeconfig", + "hubble.example:80", + "kind: NetworkPolicy", + "kind: Ingress", + "runtime.example.test", + "name: public", + "nginx.ingress.kubernetes.io/enable-cors", + "storageClassName: \"local-path\"", + } { + if !strings.Contains(customManifest, want) { + t.Fatalf("custom manifest does not contain %q", want) + } + } +} + +func helmTemplate(t *testing.T, args ...string) string { + t.Helper() + cmdArgs := append([]string{"template", "druid-cli", "."}, args...) + cmd := exec.Command("helm", cmdArgs...) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("helm template failed: %v\n%s", err, string(out)) + } + return string(out) +} diff --git a/config/helm-charts/druid-cli/templates/_helpers.tpl b/config/helm-charts/druid-cli/templates/_helpers.tpl new file mode 100644 index 00000000..04718229 --- /dev/null +++ b/config/helm-charts/druid-cli/templates/_helpers.tpl @@ -0,0 +1,37 @@ +{{- define "druid-cli.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "druid-cli.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "druid-cli.labels" -}} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/name: {{ include "druid-cli.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{- define "druid-cli.selectorLabels" -}} +app.kubernetes.io/name: {{ include "druid-cli.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{- define "druid-cli.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{- default (include "druid-cli.fullname" .) .Values.serviceAccount.name -}} +{{- else -}} +{{- default "default" .Values.serviceAccount.name -}} +{{- end -}} +{{- end -}} diff --git a/config/helm-charts/druid-cli/templates/deployment.yaml b/config/helm-charts/druid-cli/templates/deployment.yaml new file mode 100644 index 00000000..978c4fec --- /dev/null +++ b/config/helm-charts/druid-cli/templates/deployment.yaml @@ -0,0 +1,124 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "druid-cli.fullname" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "druid-cli.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "druid-cli.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "druid-cli.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 8 }} + containers: + - name: druid-cli + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - serve + - --listen=:{{ .Values.service.managementPort }} + - --public-listen=:{{ .Values.service.publicPort }} + - --runtime={{ .Values.runtime.backend }} + - --state-dir={{ .Values.runtime.stateDir }} + ports: + - name: management + containerPort: {{ .Values.service.managementPort }} + protocol: TCP + - name: public + containerPort: {{ .Values.service.publicPort }} + protocol: TCP + env: + - name: DRUID_K8S_NAMESPACE + value: {{ default .Release.Namespace .Values.runtime.namespaces.single | quote }} + - name: DRUID_K8S_STORAGE_CLASS + value: {{ .Values.runtime.storageClass | quote }} + - name: DRUID_K8S_PULL_IMAGE + value: {{ .Values.runtime.pullImage | quote }} + - name: DRUID_K8S_HELPER_IMAGE + value: {{ .Values.runtime.helperImage | quote }} + - name: DRUID_K8S_REGISTRY_SECRET + value: {{ .Values.runtime.registrySecret | quote }} + - name: DRUID_K8S_REGISTRY_PLAIN_HTTP + value: {{ ternary "true" "false" .Values.runtime.registryPlainHTTP | quote }} + - name: DRUID_HUBBLE_RELAY_ADDR + value: {{ .Values.hubble.relayAddr | quote }} + {{- if .Values.runtime.kubeconfigSecret.name }} + - name: DRUID_K8S_KUBECONFIG + value: /etc/druid/kubeconfig + {{- end }} + {{- if and .Values.auth.enabled .Values.auth.existingSecret }} + - name: DRUID_INTERNAL_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.auth.existingSecret | quote }} + key: {{ .Values.auth.tokenKey | quote }} + {{- end }} + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + readinessProbe: + httpGet: + path: /api/v1/health + port: management + livenessProbe: + httpGet: + path: /api/v1/health + port: management + volumeMounts: + - name: runtime-state + mountPath: {{ .Values.runtime.stateDir }} + {{- if .Values.runtime.kubeconfigSecret.name }} + - name: kubeconfig + mountPath: /etc/druid/kubeconfig + subPath: {{ .Values.runtime.kubeconfigSecret.key }} + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: runtime-state + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "druid-cli.fullname" . }}-state + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.runtime.kubeconfigSecret.name }} + - name: kubeconfig + secret: + secretName: {{ .Values.runtime.kubeconfigSecret.name | quote }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/config/helm-charts/druid-cli/templates/ingress.yaml b/config/helm-charts/druid-cli/templates/ingress.yaml new file mode 100644 index 00000000..93d995dc --- /dev/null +++ b/config/helm-charts/druid-cli/templates/ingress.yaml @@ -0,0 +1,35 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "druid-cli.fullname" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.ingress.className }} + ingressClassName: {{ . | quote }} + {{- end }} + {{- with .Values.ingress.tls }} + tls: + {{- toYaml . | nindent 4 }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ default "/" .path | quote }} + pathType: {{ default "Prefix" .pathType }} + backend: + service: + name: {{ include "druid-cli.fullname" $ }} + port: + name: public + {{- end }} + {{- end }} +{{- end }} diff --git a/config/helm-charts/druid-cli/templates/networkpolicy.yaml b/config/helm-charts/druid-cli/templates/networkpolicy.yaml new file mode 100644 index 00000000..e426f0f7 --- /dev/null +++ b/config/helm-charts/druid-cli/templates/networkpolicy.yaml @@ -0,0 +1,25 @@ +{{- if .Values.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "druid-cli.fullname" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "druid-cli.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + {{- toYaml .Values.networkPolicy.ingress.namespaceSelector | nindent 12 }} + podSelector: + {{- toYaml .Values.networkPolicy.ingress.podSelector | nindent 12 }} + ports: + - protocol: TCP + port: {{ .Values.service.managementPort }} + - protocol: TCP + port: {{ .Values.service.publicPort }} +{{- end }} diff --git a/config/helm-charts/druid-cli/templates/pvc.yaml b/config/helm-charts/druid-cli/templates/pvc.yaml new file mode 100644 index 00000000..29524e42 --- /dev/null +++ b/config/helm-charts/druid-cli/templates/pvc.yaml @@ -0,0 +1,18 @@ +{{- if .Values.persistence.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "druid-cli.fullname" . }}-state + labels: + {{- include "druid-cli.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + {{- $storageClass := default .Values.runtime.storageClass .Values.persistence.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end }} diff --git a/config/helm-charts/druid-cli/templates/rbac.yaml b/config/helm-charts/druid-cli/templates/rbac.yaml new file mode 100644 index 00000000..30b5cf2a --- /dev/null +++ b/config/helm-charts/druid-cli/templates/rbac.yaml @@ -0,0 +1,41 @@ +{{- $clusterWide := eq .Values.runtime.namespaces.mode "all" -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: {{ ternary "ClusterRole" "Role" $clusterWide }} +metadata: + name: {{ include "druid-cli.fullname" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims", "services", "pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: {{ ternary "ClusterRoleBinding" "RoleBinding" $clusterWide }} +metadata: + name: {{ include "druid-cli.fullname" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: {{ ternary "ClusterRole" "Role" $clusterWide }} + name: {{ include "druid-cli.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "druid-cli.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/config/helm-charts/druid-cli/templates/service.yaml b/config/helm-charts/druid-cli/templates/service.yaml new file mode 100644 index 00000000..216056a3 --- /dev/null +++ b/config/helm-charts/druid-cli/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid-cli.fullname" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.managementPort }} + targetPort: management + protocol: TCP + name: management + - port: {{ .Values.service.publicPort }} + targetPort: public + protocol: TCP + name: public + selector: + {{- include "druid-cli.selectorLabels" . | nindent 4 }} diff --git a/config/helm-charts/druid-cli/templates/serviceaccount.yaml b/config/helm-charts/druid-cli/templates/serviceaccount.yaml new file mode 100644 index 00000000..a93a23ee --- /dev/null +++ b/config/helm-charts/druid-cli/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "druid-cli.serviceAccountName" . }} + labels: + {{- include "druid-cli.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/config/helm-charts/druid-cli/values.yaml b/config/helm-charts/druid-cli/values.yaml new file mode 100644 index 00000000..cb0af8eb --- /dev/null +++ b/config/helm-charts/druid-cli/values.yaml @@ -0,0 +1,89 @@ +replicaCount: 1 + +image: + repository: ghcr.io/highcard-dev/druid + tag: dev + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + create: true + annotations: {} + name: "" + +podAnnotations: {} +podLabels: {} + +securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 + +containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + +service: + type: ClusterIP + managementPort: 8081 + publicPort: 8082 + +ingress: + enabled: false + className: nginx + annotations: + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, PUT, POST, DELETE, PATCH, OPTIONS, PROPFIND, MOVE, MKCOL, COPY" + nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,depth,destination,overwrite,if,lock-token,timeout,dav" + nginx.ingress.kubernetes.io/cors-expose-headers: "Druid-Version" + hosts: [] + tls: [] + +runtime: + backend: kubernetes + stateDir: /var/lib/druid/runtime + namespaces: + mode: single + single: "" + storageClass: "" + pullImage: ghcr.io/highcard-dev/druid-client:dev + helperImage: alpine:3.20 + registrySecret: "" + registryPlainHTTP: false + kubeconfigSecret: + name: "" + key: kubeconfig + +hubble: + relayAddr: hubble-relay.kube-system.svc.cluster.local:80 + +auth: + enabled: false + existingSecret: "" + tokenKey: token + +resources: {} + +nodeSelector: {} +tolerations: [] +affinity: {} + +persistence: + enabled: true + storageClass: "" + size: 1Gi + +networkPolicy: + enabled: false + ingress: + namespaceSelector: {} + podSelector: {} + +extraEnv: [] diff --git a/internal/api/generated.go b/internal/api/generated.go index 30235cbb..0d1c542a 100644 --- a/internal/api/generated.go +++ b/internal/api/generated.go @@ -31,6 +31,14 @@ const ( Stopped RuntimeScrollStatus = "stopped" ) +// ApplyRoutingRequest defines model for ApplyRoutingRequest. +type ApplyRoutingRequest struct { + Assignments []RuntimeRouteAssignment `json:"assignments"` +} + +// CommandStatusMap defines model for CommandStatusMap. +type CommandStatusMap map[string]interface{} + // CreateScrollRequest defines model for CreateScrollRequest. type CreateScrollRequest struct { // Artifact OCI artifact reference or local scroll path @@ -47,6 +55,7 @@ type CreateScrollRequest struct { // ScrollRoot Optional daemon-local path or backend ref containing scroll.yaml and scroll spec files. If omitted, a materializing runtime backend may pull the artifact. ScrollRoot *string `json:"scroll_root,omitempty"` + Start *bool `json:"start,omitempty"` } // DeletedScroll defines model for DeletedScroll. @@ -55,6 +64,16 @@ type DeletedScroll struct { Status string `json:"status"` } +// EnsureScrollRequest defines model for EnsureScrollRequest. +type EnsureScrollRequest struct { + Artifact string `json:"artifact"` + DataRoot *string `json:"data_root,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ScrollRoot *string `json:"scroll_root,omitempty"` + Start *bool `json:"start,omitempty"` +} + // HealthResponse defines model for HealthResponse. type HealthResponse struct { // Mode Current health status mode @@ -67,6 +86,12 @@ type HealthResponse struct { StartDate *time.Time `json:"start_date"` } +// RuntimeArtifactOperationRequest defines model for RuntimeArtifactOperationRequest. +type RuntimeArtifactOperationRequest struct { + Artifact string `json:"artifact"` + Restart *bool `json:"restart,omitempty"` +} + // RuntimePortStatus defines model for RuntimePortStatus. type RuntimePortStatus struct { Bound bool `json:"bound"` @@ -87,26 +112,67 @@ type RuntimePortStatus struct { TxBytes *int64 `json:"tx_bytes,omitempty"` } +// RuntimeRouteAssignment defines model for RuntimeRouteAssignment. +type RuntimeRouteAssignment struct { + ExternalIp *string `json:"external_ip,omitempty"` + Host *string `json:"host,omitempty"` + Name *string `json:"name,omitempty"` + PortName *string `json:"port_name,omitempty"` + Protocol *string `json:"protocol,omitempty"` + PublicPort *int `json:"public_port,omitempty"` + Url *string `json:"url,omitempty"` +} + +// RuntimeRoutingTarget defines model for RuntimeRoutingTarget. +type RuntimeRoutingTarget struct { + Name string `json:"name"` + Namespace *string `json:"namespace,omitempty"` + Port int `json:"port"` + PortName string `json:"port_name"` + Procedure string `json:"procedure"` + Protocol string `json:"protocol"` + Selector *map[string]string `json:"selector,omitempty"` + ServiceName string `json:"service_name"` + ServicePort int `json:"service_port"` +} + // RuntimeScroll defines model for RuntimeScroll. type RuntimeScroll struct { - Artifact string `json:"artifact"` - Commands *map[string]interface{} `json:"commands,omitempty"` - CreatedAt time.Time `json:"created_at"` - DataRoot string `json:"data_root"` - Id string `json:"id"` - OwnerId *string `json:"owner_id,omitempty"` - ScrollName string `json:"scroll_name"` - ScrollRoot string `json:"scroll_root"` - Status RuntimeScrollStatus `json:"status"` - UpdatedAt time.Time `json:"updated_at"` + Artifact string `json:"artifact"` + Commands *map[string]interface{} `json:"commands,omitempty"` + CreatedAt time.Time `json:"created_at"` + DataRoot string `json:"data_root"` + Id string `json:"id"` + LastError *string `json:"last_error,omitempty"` + OwnerId *string `json:"owner_id,omitempty"` + Routing *[]RuntimeRouteAssignment `json:"routing,omitempty"` + ScrollName string `json:"scroll_name"` + ScrollRoot string `json:"scroll_root"` + Status RuntimeScrollStatus `json:"status"` + UpdatedAt time.Time `json:"updated_at"` } // RuntimeScrollStatus defines model for RuntimeScroll.Status. type RuntimeScrollStatus string +// ScrollLogMap defines model for ScrollLogMap. +type ScrollLogMap map[string][]string + // CreateScrollJSONRequestBody defines body for CreateScroll for application/json ContentType. type CreateScrollJSONRequestBody = CreateScrollRequest +// EnsureScrollJSONRequestBody defines body for EnsureScroll for application/json ContentType. +type EnsureScrollJSONRequestBody = EnsureScrollRequest + +// BackupScrollJSONRequestBody defines body for BackupScroll for application/json ContentType. +type BackupScrollJSONRequestBody = RuntimeArtifactOperationRequest + +// RestoreScrollJSONRequestBody defines body for RestoreScroll for application/json ContentType. +type RestoreScrollJSONRequestBody = RuntimeArtifactOperationRequest + +// ApplyScrollRoutingJSONRequestBody defines body for ApplyScrollRouting for application/json ContentType. +type ApplyScrollRoutingJSONRequestBody = ApplyRoutingRequest + // RequestEditorFn is the function signature for the RequestEditor callback function type RequestEditorFn func(ctx context.Context, req *http.Request) error @@ -191,17 +257,61 @@ type ClientInterface interface { CreateScroll(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + // EnsureScrollWithBody request with any body + EnsureScrollWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + EnsureScroll(ctx context.Context, body EnsureScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + // DeleteScroll request DeleteScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) // GetScroll request GetScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + // BackupScrollWithBody request with any body + BackupScrollWithBody(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + BackupScroll(ctx context.Context, id string, body BackupScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + // RunScrollCommand request RunScrollCommand(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*http.Response, error) + // GetScrollConfig request + GetScrollConfig(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetScrollConsoles request + GetScrollConsoles(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetScrollLogs request + GetScrollLogs(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + // GetScrollPorts request GetScrollPorts(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetScrollProcedures request + GetScrollProcedures(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetScrollQueue request + GetScrollQueue(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // RestoreScrollWithBody request with any body + RestoreScrollWithBody(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + RestoreScroll(ctx context.Context, id string, body RestoreScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // ApplyScrollRoutingWithBody request with any body + ApplyScrollRoutingWithBody(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + ApplyScrollRouting(ctx context.Context, id string, body ApplyScrollRoutingJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetScrollRoutingTargets request + GetScrollRoutingTargets(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // StartScroll request + StartScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // StopScroll request + StopScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) } func (c *Client) GetHealthAuth(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { @@ -252,6 +362,30 @@ func (c *Client) CreateScroll(ctx context.Context, body CreateScrollJSONRequestB return c.Client.Do(req) } +func (c *Client) EnsureScrollWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewEnsureScrollRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) EnsureScroll(ctx context.Context, body EnsureScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewEnsureScrollRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) DeleteScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewDeleteScrollRequest(c.Server, id) if err != nil { @@ -276,6 +410,30 @@ func (c *Client) GetScroll(ctx context.Context, id string, reqEditors ...Request return c.Client.Do(req) } +func (c *Client) BackupScrollWithBody(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewBackupScrollRequestWithBody(c.Server, id, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) BackupScroll(ctx context.Context, id string, body BackupScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewBackupScrollRequest(c.Server, id, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) RunScrollCommand(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewRunScrollCommandRequest(c.Server, id, command) if err != nil { @@ -288,6 +446,42 @@ func (c *Client) RunScrollCommand(ctx context.Context, id string, command string return c.Client.Do(req) } +func (c *Client) GetScrollConfig(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollConfigRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetScrollConsoles(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollConsolesRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetScrollLogs(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollLogsRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) GetScrollPorts(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewGetScrollPortsRequest(c.Server, id) if err != nil { @@ -300,6 +494,114 @@ func (c *Client) GetScrollPorts(ctx context.Context, id string, reqEditors ...Re return c.Client.Do(req) } +func (c *Client) GetScrollProcedures(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollProceduresRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetScrollQueue(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollQueueRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) RestoreScrollWithBody(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewRestoreScrollRequestWithBody(c.Server, id, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) RestoreScroll(ctx context.Context, id string, body RestoreScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewRestoreScrollRequest(c.Server, id, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) ApplyScrollRoutingWithBody(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewApplyScrollRoutingRequestWithBody(c.Server, id, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) ApplyScrollRouting(ctx context.Context, id string, body ApplyScrollRoutingJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewApplyScrollRoutingRequest(c.Server, id, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetScrollRoutingTargets(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetScrollRoutingTargetsRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) StartScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewStartScrollRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) StopScroll(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewStopScrollRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + // NewGetHealthAuthRequest generates requests for GetHealthAuth func NewGetHealthAuthRequest(server string) (*http.Request, error) { var err error @@ -394,23 +696,27 @@ func NewCreateScrollRequestWithBody(server string, contentType string, body io.R return req, nil } -// NewDeleteScrollRequest generates requests for DeleteScroll -func NewDeleteScrollRequest(server string, id string) (*http.Request, error) { - var err error - - var pathParam0 string - - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) +// NewEnsureScrollRequest calls the generic EnsureScroll builder with application/json body +func NewEnsureScrollRequest(server string, body EnsureScrollJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) if err != nil { return nil, err } + bodyReader = bytes.NewReader(buf) + return NewEnsureScrollRequestWithBody(server, "application/json", bodyReader) +} + +// NewEnsureScrollRequestWithBody generates requests for EnsureScroll with any type of body +func NewEnsureScrollRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/api/v1/scrolls/%s", pathParam0) + operationPath := fmt.Sprintf("/api/v1/scrolls/ensure") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -420,16 +726,18 @@ func NewDeleteScrollRequest(server string, id string) (*http.Request, error) { return nil, err } - req, err := http.NewRequest("DELETE", queryURL.String(), nil) + req, err := http.NewRequest("POST", queryURL.String(), body) if err != nil { return nil, err } + req.Header.Add("Content-Type", contentType) + return req, nil } -// NewGetScrollRequest generates requests for GetScroll -func NewGetScrollRequest(server string, id string) (*http.Request, error) { +// NewDeleteScrollRequest generates requests for DeleteScroll +func NewDeleteScrollRequest(server string, id string) (*http.Request, error) { var err error var pathParam0 string @@ -454,7 +762,7 @@ func NewGetScrollRequest(server string, id string) (*http.Request, error) { return nil, err } - req, err := http.NewRequest("GET", queryURL.String(), nil) + req, err := http.NewRequest("DELETE", queryURL.String(), nil) if err != nil { return nil, err } @@ -462,8 +770,8 @@ func NewGetScrollRequest(server string, id string) (*http.Request, error) { return req, nil } -// NewRunScrollCommandRequest generates requests for RunScrollCommand -func NewRunScrollCommandRequest(server string, id string, command string) (*http.Request, error) { +// NewGetScrollRequest generates requests for GetScroll +func NewGetScrollRequest(server string, id string) (*http.Request, error) { var err error var pathParam0 string @@ -473,19 +781,12 @@ func NewRunScrollCommandRequest(server string, id string, command string) (*http return nil, err } - var pathParam1 string - - pathParam1, err = runtime.StyleParamWithLocation("simple", false, "command", runtime.ParamLocationPath, command) - if err != nil { - return nil, err - } - serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/api/v1/scrolls/%s/commands/%s", pathParam0, pathParam1) + operationPath := fmt.Sprintf("/api/v1/scrolls/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -495,7 +796,7 @@ func NewRunScrollCommandRequest(server string, id string, command string) (*http return nil, err } - req, err := http.NewRequest("POST", queryURL.String(), nil) + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err } @@ -503,8 +804,19 @@ func NewRunScrollCommandRequest(server string, id string, command string) (*http return req, nil } -// NewGetScrollPortsRequest generates requests for GetScrollPorts -func NewGetScrollPortsRequest(server string, id string) (*http.Request, error) { +// NewBackupScrollRequest calls the generic BackupScroll builder with application/json body +func NewBackupScrollRequest(server string, id string, body BackupScrollJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewBackupScrollRequestWithBody(server, id, "application/json", bodyReader) +} + +// NewBackupScrollRequestWithBody generates requests for BackupScroll with any type of body +func NewBackupScrollRequestWithBody(server string, id string, contentType string, body io.Reader) (*http.Request, error) { var err error var pathParam0 string @@ -519,7 +831,7 @@ func NewGetScrollPortsRequest(server string, id string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/api/v1/scrolls/%s/ports", pathParam0) + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/backup", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -529,252 +841,1003 @@ func NewGetScrollPortsRequest(server string, id string) (*http.Request, error) { return nil, err } - req, err := http.NewRequest("GET", queryURL.String(), nil) + req, err := http.NewRequest("POST", queryURL.String(), body) if err != nil { return nil, err } + req.Header.Add("Content-Type", contentType) + return req, nil } -func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { - for _, r := range c.RequestEditors { - if err := r(ctx, req); err != nil { - return err - } - } - for _, r := range additionalEditors { - if err := r(ctx, req); err != nil { - return err - } - } - return nil -} +// NewRunScrollCommandRequest generates requests for RunScrollCommand +func NewRunScrollCommandRequest(server string, id string, command string) (*http.Request, error) { + var err error -// ClientWithResponses builds on ClientInterface to offer response payloads -type ClientWithResponses struct { - ClientInterface -} + var pathParam0 string -// NewClientWithResponses creates a new ClientWithResponses, which wraps -// Client with return type handling -func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { - client, err := NewClient(server, opts...) + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) if err != nil { return nil, err } - return &ClientWithResponses{client}, nil -} - -// WithBaseURL overrides the baseURL. -func WithBaseURL(baseURL string) ClientOption { - return func(c *Client) error { - newBaseURL, err := url.Parse(baseURL) - if err != nil { - return err - } - c.Server = newBaseURL.String() - return nil - } -} -// ClientWithResponsesInterface is the interface specification for the client with responses above. -type ClientWithResponsesInterface interface { - // GetHealthAuthWithResponse request - GetHealthAuthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthAuthResponse, error) - - // ListScrollsWithResponse request - ListScrollsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListScrollsResponse, error) + var pathParam1 string - // CreateScrollWithBodyWithResponse request with any body - CreateScrollWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "command", runtime.ParamLocationPath, command) + if err != nil { + return nil, err + } - CreateScrollWithResponse(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } - // DeleteScrollWithResponse request - DeleteScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteScrollResponse, error) + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/commands/%s", pathParam0, pathParam1) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } - // GetScrollWithResponse request - GetScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollResponse, error) + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } - // RunScrollCommandWithResponse request - RunScrollCommandWithResponse(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*RunScrollCommandResponse, error) + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } - // GetScrollPortsWithResponse request - GetScrollPortsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollPortsResponse, error) + return req, nil } -type GetHealthAuthResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *HealthResponse - JSON503 *HealthResponse -} +// NewGetScrollConfigRequest generates requests for GetScrollConfig +func NewGetScrollConfigRequest(server string, id string) (*http.Request, error) { + var err error -// Status returns HTTPResponse.Status -func (r GetHealthAuthResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status - } - return http.StatusText(0) -} + var pathParam0 string -// StatusCode returns HTTPResponse.StatusCode -func (r GetHealthAuthResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err } - return 0 -} -type ListScrollsResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *[]RuntimeScroll -} - -// Status returns HTTPResponse.Status -func (r ListScrollsResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status + serverURL, err := url.Parse(server) + if err != nil { + return nil, err } - return http.StatusText(0) -} -// StatusCode returns HTTPResponse.StatusCode -func (r ListScrollsResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/config", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath } - return 0 -} - -type CreateScrollResponse struct { - Body []byte - HTTPResponse *http.Response - JSON201 *RuntimeScroll -} -// Status returns HTTPResponse.Status -func (r CreateScrollResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err } - return http.StatusText(0) -} -// StatusCode returns HTTPResponse.StatusCode -func (r CreateScrollResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err } - return 0 -} -type DeleteScrollResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *DeletedScroll + return req, nil } -// Status returns HTTPResponse.Status -func (r DeleteScrollResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status +// NewGetScrollConsolesRequest generates requests for GetScrollConsoles +func NewGetScrollConsolesRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err } - return http.StatusText(0) -} -// StatusCode returns HTTPResponse.StatusCode -func (r DeleteScrollResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode + serverURL, err := url.Parse(server) + if err != nil { + return nil, err } - return 0 -} -type GetScrollResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *RuntimeScroll -} + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/consoles", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } -// Status returns HTTPResponse.Status -func (r GetScrollResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err } - return http.StatusText(0) -} -// StatusCode returns HTTPResponse.StatusCode -func (r GetScrollResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err } - return 0 -} -type RunScrollCommandResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *RuntimeScroll + return req, nil } -// Status returns HTTPResponse.Status -func (r RunScrollCommandResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status - } - return http.StatusText(0) -} +// NewGetScrollLogsRequest generates requests for GetScrollLogs +func NewGetScrollLogsRequest(server string, id string) (*http.Request, error) { + var err error -// StatusCode returns HTTPResponse.StatusCode -func (r RunScrollCommandResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode - } - return 0 -} + var pathParam0 string -type GetScrollPortsResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *[]RuntimePortStatus -} + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } -// Status returns HTTPResponse.Status -func (r GetScrollPortsResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status + serverURL, err := url.Parse(server) + if err != nil { + return nil, err } - return http.StatusText(0) -} -// StatusCode returns HTTPResponse.StatusCode -func (r GetScrollPortsResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/logs", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath } - return 0 -} -// GetHealthAuthWithResponse request returning *GetHealthAuthResponse -func (c *ClientWithResponses) GetHealthAuthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthAuthResponse, error) { - rsp, err := c.GetHealthAuth(ctx, reqEditors...) + queryURL, err := serverURL.Parse(operationPath) if err != nil { return nil, err } - return ParseGetHealthAuthResponse(rsp) -} -// ListScrollsWithResponse request returning *ListScrollsResponse -func (c *ClientWithResponses) ListScrollsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListScrollsResponse, error) { - rsp, err := c.ListScrolls(ctx, reqEditors...) + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err } - return ParseListScrollsResponse(rsp) + + return req, nil +} + +// NewGetScrollPortsRequest generates requests for GetScrollPorts +func NewGetScrollPortsRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/ports", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetScrollProceduresRequest generates requests for GetScrollProcedures +func NewGetScrollProceduresRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/procedures", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetScrollQueueRequest generates requests for GetScrollQueue +func NewGetScrollQueueRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/queue", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewRestoreScrollRequest calls the generic RestoreScroll builder with application/json body +func NewRestoreScrollRequest(server string, id string, body RestoreScrollJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewRestoreScrollRequestWithBody(server, id, "application/json", bodyReader) +} + +// NewRestoreScrollRequestWithBody generates requests for RestoreScroll with any type of body +func NewRestoreScrollRequestWithBody(server string, id string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/restore", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewApplyScrollRoutingRequest calls the generic ApplyScrollRouting builder with application/json body +func NewApplyScrollRoutingRequest(server string, id string, body ApplyScrollRoutingJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewApplyScrollRoutingRequestWithBody(server, id, "application/json", bodyReader) +} + +// NewApplyScrollRoutingRequestWithBody generates requests for ApplyScrollRouting with any type of body +func NewApplyScrollRoutingRequestWithBody(server string, id string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/routing", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewGetScrollRoutingTargetsRequest generates requests for GetScrollRoutingTargets +func NewGetScrollRoutingTargetsRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/routing/targets", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewStartScrollRequest generates requests for StartScroll +func NewStartScrollRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/start", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewStopScrollRequest generates requests for StopScroll +func NewStopScrollRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/scrolls/%s/stop", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // GetHealthAuthWithResponse request + GetHealthAuthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthAuthResponse, error) + + // ListScrollsWithResponse request + ListScrollsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListScrollsResponse, error) + + // CreateScrollWithBodyWithResponse request with any body + CreateScrollWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) + + CreateScrollWithResponse(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) + + // EnsureScrollWithBodyWithResponse request with any body + EnsureScrollWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*EnsureScrollResponse, error) + + EnsureScrollWithResponse(ctx context.Context, body EnsureScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*EnsureScrollResponse, error) + + // DeleteScrollWithResponse request + DeleteScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteScrollResponse, error) + + // GetScrollWithResponse request + GetScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollResponse, error) + + // BackupScrollWithBodyWithResponse request with any body + BackupScrollWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*BackupScrollResponse, error) + + BackupScrollWithResponse(ctx context.Context, id string, body BackupScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*BackupScrollResponse, error) + + // RunScrollCommandWithResponse request + RunScrollCommandWithResponse(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*RunScrollCommandResponse, error) + + // GetScrollConfigWithResponse request + GetScrollConfigWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollConfigResponse, error) + + // GetScrollConsolesWithResponse request + GetScrollConsolesWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollConsolesResponse, error) + + // GetScrollLogsWithResponse request + GetScrollLogsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollLogsResponse, error) + + // GetScrollPortsWithResponse request + GetScrollPortsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollPortsResponse, error) + + // GetScrollProceduresWithResponse request + GetScrollProceduresWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollProceduresResponse, error) + + // GetScrollQueueWithResponse request + GetScrollQueueWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollQueueResponse, error) + + // RestoreScrollWithBodyWithResponse request with any body + RestoreScrollWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*RestoreScrollResponse, error) + + RestoreScrollWithResponse(ctx context.Context, id string, body RestoreScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*RestoreScrollResponse, error) + + // ApplyScrollRoutingWithBodyWithResponse request with any body + ApplyScrollRoutingWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*ApplyScrollRoutingResponse, error) + + ApplyScrollRoutingWithResponse(ctx context.Context, id string, body ApplyScrollRoutingJSONRequestBody, reqEditors ...RequestEditorFn) (*ApplyScrollRoutingResponse, error) + + // GetScrollRoutingTargetsWithResponse request + GetScrollRoutingTargetsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollRoutingTargetsResponse, error) + + // StartScrollWithResponse request + StartScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*StartScrollResponse, error) + + // StopScrollWithResponse request + StopScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*StopScrollResponse, error) +} + +type GetHealthAuthResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *HealthResponse + JSON503 *HealthResponse +} + +// Status returns HTTPResponse.Status +func (r GetHealthAuthResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetHealthAuthResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type ListScrollsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r ListScrollsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r ListScrollsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type CreateScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON201 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r CreateScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r CreateScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type EnsureScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r EnsureScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r EnsureScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DeleteScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *DeletedScroll +} + +// Status returns HTTPResponse.Status +func (r DeleteScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DeleteScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r GetScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type BackupScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r BackupScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r BackupScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type RunScrollCommandResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r RunScrollCommandResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r RunScrollCommandResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollConfigResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *map[string]interface{} +} + +// Status returns HTTPResponse.Status +func (r GetScrollConfigResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollConfigResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollConsolesResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *map[string]interface{} +} + +// Status returns HTTPResponse.Status +func (r GetScrollConsolesResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollConsolesResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollLogsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *ScrollLogMap +} + +// Status returns HTTPResponse.Status +func (r GetScrollLogsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollLogsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollPortsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]RuntimePortStatus +} + +// Status returns HTTPResponse.Status +func (r GetScrollPortsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollPortsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollProceduresResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *CommandStatusMap +} + +// Status returns HTTPResponse.Status +func (r GetScrollProceduresResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollProceduresResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollQueueResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *CommandStatusMap +} + +// Status returns HTTPResponse.Status +func (r GetScrollQueueResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollQueueResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type RestoreScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r RestoreScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r RestoreScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type ApplyScrollRoutingResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r ApplyScrollRoutingResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r ApplyScrollRoutingResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetScrollRoutingTargetsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]RuntimeRoutingTarget +} + +// Status returns HTTPResponse.Status +func (r GetScrollRoutingTargetsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetScrollRoutingTargetsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type StartScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r StartScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r StartScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type StopScrollResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *RuntimeScroll +} + +// Status returns HTTPResponse.Status +func (r StopScrollResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r StopScrollResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// GetHealthAuthWithResponse request returning *GetHealthAuthResponse +func (c *ClientWithResponses) GetHealthAuthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthAuthResponse, error) { + rsp, err := c.GetHealthAuth(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetHealthAuthResponse(rsp) +} + +// ListScrollsWithResponse request returning *ListScrollsResponse +func (c *ClientWithResponses) ListScrollsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListScrollsResponse, error) { + rsp, err := c.ListScrolls(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseListScrollsResponse(rsp) } // CreateScrollWithBodyWithResponse request with arbitrary body returning *CreateScrollResponse @@ -783,102 +1846,554 @@ func (c *ClientWithResponses) CreateScrollWithBodyWithResponse(ctx context.Conte if err != nil { return nil, err } - return ParseCreateScrollResponse(rsp) + return ParseCreateScrollResponse(rsp) +} + +func (c *ClientWithResponses) CreateScrollWithResponse(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) { + rsp, err := c.CreateScroll(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCreateScrollResponse(rsp) +} + +// EnsureScrollWithBodyWithResponse request with arbitrary body returning *EnsureScrollResponse +func (c *ClientWithResponses) EnsureScrollWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*EnsureScrollResponse, error) { + rsp, err := c.EnsureScrollWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseEnsureScrollResponse(rsp) +} + +func (c *ClientWithResponses) EnsureScrollWithResponse(ctx context.Context, body EnsureScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*EnsureScrollResponse, error) { + rsp, err := c.EnsureScroll(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseEnsureScrollResponse(rsp) +} + +// DeleteScrollWithResponse request returning *DeleteScrollResponse +func (c *ClientWithResponses) DeleteScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteScrollResponse, error) { + rsp, err := c.DeleteScroll(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseDeleteScrollResponse(rsp) +} + +// GetScrollWithResponse request returning *GetScrollResponse +func (c *ClientWithResponses) GetScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollResponse, error) { + rsp, err := c.GetScroll(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollResponse(rsp) +} + +// BackupScrollWithBodyWithResponse request with arbitrary body returning *BackupScrollResponse +func (c *ClientWithResponses) BackupScrollWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*BackupScrollResponse, error) { + rsp, err := c.BackupScrollWithBody(ctx, id, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseBackupScrollResponse(rsp) +} + +func (c *ClientWithResponses) BackupScrollWithResponse(ctx context.Context, id string, body BackupScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*BackupScrollResponse, error) { + rsp, err := c.BackupScroll(ctx, id, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseBackupScrollResponse(rsp) +} + +// RunScrollCommandWithResponse request returning *RunScrollCommandResponse +func (c *ClientWithResponses) RunScrollCommandWithResponse(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*RunScrollCommandResponse, error) { + rsp, err := c.RunScrollCommand(ctx, id, command, reqEditors...) + if err != nil { + return nil, err + } + return ParseRunScrollCommandResponse(rsp) +} + +// GetScrollConfigWithResponse request returning *GetScrollConfigResponse +func (c *ClientWithResponses) GetScrollConfigWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollConfigResponse, error) { + rsp, err := c.GetScrollConfig(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollConfigResponse(rsp) +} + +// GetScrollConsolesWithResponse request returning *GetScrollConsolesResponse +func (c *ClientWithResponses) GetScrollConsolesWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollConsolesResponse, error) { + rsp, err := c.GetScrollConsoles(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollConsolesResponse(rsp) +} + +// GetScrollLogsWithResponse request returning *GetScrollLogsResponse +func (c *ClientWithResponses) GetScrollLogsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollLogsResponse, error) { + rsp, err := c.GetScrollLogs(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollLogsResponse(rsp) +} + +// GetScrollPortsWithResponse request returning *GetScrollPortsResponse +func (c *ClientWithResponses) GetScrollPortsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollPortsResponse, error) { + rsp, err := c.GetScrollPorts(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollPortsResponse(rsp) +} + +// GetScrollProceduresWithResponse request returning *GetScrollProceduresResponse +func (c *ClientWithResponses) GetScrollProceduresWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollProceduresResponse, error) { + rsp, err := c.GetScrollProcedures(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollProceduresResponse(rsp) +} + +// GetScrollQueueWithResponse request returning *GetScrollQueueResponse +func (c *ClientWithResponses) GetScrollQueueWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollQueueResponse, error) { + rsp, err := c.GetScrollQueue(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollQueueResponse(rsp) +} + +// RestoreScrollWithBodyWithResponse request with arbitrary body returning *RestoreScrollResponse +func (c *ClientWithResponses) RestoreScrollWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*RestoreScrollResponse, error) { + rsp, err := c.RestoreScrollWithBody(ctx, id, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseRestoreScrollResponse(rsp) +} + +func (c *ClientWithResponses) RestoreScrollWithResponse(ctx context.Context, id string, body RestoreScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*RestoreScrollResponse, error) { + rsp, err := c.RestoreScroll(ctx, id, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseRestoreScrollResponse(rsp) +} + +// ApplyScrollRoutingWithBodyWithResponse request with arbitrary body returning *ApplyScrollRoutingResponse +func (c *ClientWithResponses) ApplyScrollRoutingWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*ApplyScrollRoutingResponse, error) { + rsp, err := c.ApplyScrollRoutingWithBody(ctx, id, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseApplyScrollRoutingResponse(rsp) +} + +func (c *ClientWithResponses) ApplyScrollRoutingWithResponse(ctx context.Context, id string, body ApplyScrollRoutingJSONRequestBody, reqEditors ...RequestEditorFn) (*ApplyScrollRoutingResponse, error) { + rsp, err := c.ApplyScrollRouting(ctx, id, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseApplyScrollRoutingResponse(rsp) +} + +// GetScrollRoutingTargetsWithResponse request returning *GetScrollRoutingTargetsResponse +func (c *ClientWithResponses) GetScrollRoutingTargetsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollRoutingTargetsResponse, error) { + rsp, err := c.GetScrollRoutingTargets(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetScrollRoutingTargetsResponse(rsp) } -func (c *ClientWithResponses) CreateScrollWithResponse(ctx context.Context, body CreateScrollJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateScrollResponse, error) { - rsp, err := c.CreateScroll(ctx, body, reqEditors...) +// StartScrollWithResponse request returning *StartScrollResponse +func (c *ClientWithResponses) StartScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*StartScrollResponse, error) { + rsp, err := c.StartScroll(ctx, id, reqEditors...) if err != nil { return nil, err } - return ParseCreateScrollResponse(rsp) + return ParseStartScrollResponse(rsp) +} + +// StopScrollWithResponse request returning *StopScrollResponse +func (c *ClientWithResponses) StopScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*StopScrollResponse, error) { + rsp, err := c.StopScroll(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseStopScrollResponse(rsp) +} + +// ParseGetHealthAuthResponse parses an HTTP response from a GetHealthAuthWithResponse call +func ParseGetHealthAuthResponse(rsp *http.Response) (*GetHealthAuthResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetHealthAuthResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest HealthResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 503: + var dest HealthResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON503 = &dest + + } + + return response, nil +} + +// ParseListScrollsResponse parses an HTTP response from a ListScrollsWithResponse call +func ParseListScrollsResponse(rsp *http.Response) (*ListScrollsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &ListScrollsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParseCreateScrollResponse parses an HTTP response from a CreateScrollWithResponse call +func ParseCreateScrollResponse(rsp *http.Response) (*CreateScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &CreateScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 201: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON201 = &dest + + } + + return response, nil +} + +// ParseEnsureScrollResponse parses an HTTP response from a EnsureScrollWithResponse call +func ParseEnsureScrollResponse(rsp *http.Response) (*EnsureScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &EnsureScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParseDeleteScrollResponse parses an HTTP response from a DeleteScrollWithResponse call +func ParseDeleteScrollResponse(rsp *http.Response) (*DeleteScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DeleteScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest DeletedScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParseGetScrollResponse parses an HTTP response from a GetScrollWithResponse call +func ParseGetScrollResponse(rsp *http.Response) (*GetScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParseBackupScrollResponse parses an HTTP response from a BackupScrollWithResponse call +func ParseBackupScrollResponse(rsp *http.Response) (*BackupScrollResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &BackupScrollResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParseRunScrollCommandResponse parses an HTTP response from a RunScrollCommandWithResponse call +func ParseRunScrollCommandResponse(rsp *http.Response) (*RunScrollCommandResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &RunScrollCommandResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest RuntimeScroll + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParseGetScrollConfigResponse parses an HTTP response from a GetScrollConfigWithResponse call +func ParseGetScrollConfigResponse(rsp *http.Response) (*GetScrollConfigResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetScrollConfigResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest map[string]interface{} + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil } -// DeleteScrollWithResponse request returning *DeleteScrollResponse -func (c *ClientWithResponses) DeleteScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteScrollResponse, error) { - rsp, err := c.DeleteScroll(ctx, id, reqEditors...) - if err != nil { - return nil, err +// ParseGetScrollConsolesResponse parses an HTTP response from a GetScrollConsolesWithResponse call +func ParseGetScrollConsolesResponse(rsp *http.Response) (*GetScrollConsolesResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetScrollConsolesResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest map[string]interface{} + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + } - return ParseDeleteScrollResponse(rsp) + + return response, nil } -// GetScrollWithResponse request returning *GetScrollResponse -func (c *ClientWithResponses) GetScrollWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollResponse, error) { - rsp, err := c.GetScroll(ctx, id, reqEditors...) +// ParseGetScrollLogsResponse parses an HTTP response from a GetScrollLogsWithResponse call +func ParseGetScrollLogsResponse(rsp *http.Response) (*GetScrollLogsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - return ParseGetScrollResponse(rsp) -} -// RunScrollCommandWithResponse request returning *RunScrollCommandResponse -func (c *ClientWithResponses) RunScrollCommandWithResponse(ctx context.Context, id string, command string, reqEditors ...RequestEditorFn) (*RunScrollCommandResponse, error) { - rsp, err := c.RunScrollCommand(ctx, id, command, reqEditors...) - if err != nil { - return nil, err + response := &GetScrollLogsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, } - return ParseRunScrollCommandResponse(rsp) -} -// GetScrollPortsWithResponse request returning *GetScrollPortsResponse -func (c *ClientWithResponses) GetScrollPortsWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetScrollPortsResponse, error) { - rsp, err := c.GetScrollPorts(ctx, id, reqEditors...) - if err != nil { - return nil, err + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest ScrollLogMap + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + } - return ParseGetScrollPortsResponse(rsp) + + return response, nil } -// ParseGetHealthAuthResponse parses an HTTP response from a GetHealthAuthWithResponse call -func ParseGetHealthAuthResponse(rsp *http.Response) (*GetHealthAuthResponse, error) { +// ParseGetScrollPortsResponse parses an HTTP response from a GetScrollPortsWithResponse call +func ParseGetScrollPortsResponse(rsp *http.Response) (*GetScrollPortsResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GetHealthAuthResponse{ + response := &GetScrollPortsResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest HealthResponse + var dest []RuntimePortStatus if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } response.JSON200 = &dest - case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 503: - var dest HealthResponse + } + + return response, nil +} + +// ParseGetScrollProceduresResponse parses an HTTP response from a GetScrollProceduresWithResponse call +func ParseGetScrollProceduresResponse(rsp *http.Response) (*GetScrollProceduresResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetScrollProceduresResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest CommandStatusMap if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } - response.JSON503 = &dest + response.JSON200 = &dest } return response, nil } -// ParseListScrollsResponse parses an HTTP response from a ListScrollsWithResponse call -func ParseListScrollsResponse(rsp *http.Response) (*ListScrollsResponse, error) { +// ParseGetScrollQueueResponse parses an HTTP response from a GetScrollQueueWithResponse call +func ParseGetScrollQueueResponse(rsp *http.Response) (*GetScrollQueueResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &ListScrollsResponse{ + response := &GetScrollQueueResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest []RuntimeScroll + var dest CommandStatusMap if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -889,48 +2404,48 @@ func ParseListScrollsResponse(rsp *http.Response) (*ListScrollsResponse, error) return response, nil } -// ParseCreateScrollResponse parses an HTTP response from a CreateScrollWithResponse call -func ParseCreateScrollResponse(rsp *http.Response) (*CreateScrollResponse, error) { +// ParseRestoreScrollResponse parses an HTTP response from a RestoreScrollWithResponse call +func ParseRestoreScrollResponse(rsp *http.Response) (*RestoreScrollResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &CreateScrollResponse{ + response := &RestoreScrollResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { - case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 201: + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: var dest RuntimeScroll if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } - response.JSON201 = &dest + response.JSON200 = &dest } return response, nil } -// ParseDeleteScrollResponse parses an HTTP response from a DeleteScrollWithResponse call -func ParseDeleteScrollResponse(rsp *http.Response) (*DeleteScrollResponse, error) { +// ParseApplyScrollRoutingResponse parses an HTTP response from a ApplyScrollRoutingWithResponse call +func ParseApplyScrollRoutingResponse(rsp *http.Response) (*ApplyScrollRoutingResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &DeleteScrollResponse{ + response := &ApplyScrollRoutingResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest DeletedScroll + var dest RuntimeScroll if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -941,22 +2456,22 @@ func ParseDeleteScrollResponse(rsp *http.Response) (*DeleteScrollResponse, error return response, nil } -// ParseGetScrollResponse parses an HTTP response from a GetScrollWithResponse call -func ParseGetScrollResponse(rsp *http.Response) (*GetScrollResponse, error) { +// ParseGetScrollRoutingTargetsResponse parses an HTTP response from a GetScrollRoutingTargetsWithResponse call +func ParseGetScrollRoutingTargetsResponse(rsp *http.Response) (*GetScrollRoutingTargetsResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GetScrollResponse{ + response := &GetScrollRoutingTargetsResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest RuntimeScroll + var dest []RuntimeRoutingTarget if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -967,15 +2482,15 @@ func ParseGetScrollResponse(rsp *http.Response) (*GetScrollResponse, error) { return response, nil } -// ParseRunScrollCommandResponse parses an HTTP response from a RunScrollCommandWithResponse call -func ParseRunScrollCommandResponse(rsp *http.Response) (*RunScrollCommandResponse, error) { +// ParseStartScrollResponse parses an HTTP response from a StartScrollWithResponse call +func ParseStartScrollResponse(rsp *http.Response) (*StartScrollResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &RunScrollCommandResponse{ + response := &StartScrollResponse{ Body: bodyBytes, HTTPResponse: rsp, } @@ -993,22 +2508,22 @@ func ParseRunScrollCommandResponse(rsp *http.Response) (*RunScrollCommandRespons return response, nil } -// ParseGetScrollPortsResponse parses an HTTP response from a GetScrollPortsWithResponse call -func ParseGetScrollPortsResponse(rsp *http.Response) (*GetScrollPortsResponse, error) { +// ParseStopScrollResponse parses an HTTP response from a StopScrollWithResponse call +func ParseStopScrollResponse(rsp *http.Response) (*StopScrollResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GetScrollPortsResponse{ + response := &StopScrollResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest []RuntimePortStatus + var dest RuntimeScroll if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -1030,18 +2545,54 @@ type ServerInterface interface { // Create runtime scroll // (POST /api/v1/scrolls) CreateScroll(c *fiber.Ctx) error + // Ensure runtime scroll exists and optionally starts + // (POST /api/v1/scrolls/ensure) + EnsureScroll(c *fiber.Ctx) error // Delete runtime scroll // (DELETE /api/v1/scrolls/{id}) DeleteScroll(c *fiber.Ctx, id string) error // Get runtime scroll // (GET /api/v1/scrolls/{id}) GetScroll(c *fiber.Ctx, id string) error + // Execute runtime backup + // (POST /api/v1/scrolls/{id}/backup) + BackupScroll(c *fiber.Ctx, id string) error // Run runtime scroll command // (POST /api/v1/scrolls/{id}/commands/{command}) RunScrollCommand(c *fiber.Ctx, id string, command string) error + // Get parsed scroll config + // (GET /api/v1/scrolls/{id}/config) + GetScrollConfig(c *fiber.Ctx, id string) error + // Get scroll-scoped consoles + // (GET /api/v1/scrolls/{id}/consoles) + GetScrollConsoles(c *fiber.Ctx, id string) error + // Get scroll-scoped logs + // (GET /api/v1/scrolls/{id}/logs) + GetScrollLogs(c *fiber.Ctx, id string) error // Get runtime scroll port status // (GET /api/v1/scrolls/{id}/ports) GetScrollPorts(c *fiber.Ctx, id string) error + // Get procedure state + // (GET /api/v1/scrolls/{id}/procedures) + GetScrollProcedures(c *fiber.Ctx, id string) error + // Get runtime queue state + // (GET /api/v1/scrolls/{id}/queue) + GetScrollQueue(c *fiber.Ctx, id string) error + // Execute runtime restore + // (POST /api/v1/scrolls/{id}/restore) + RestoreScroll(c *fiber.Ctx, id string) error + // Persist operator-assigned public routing + // (POST /api/v1/scrolls/{id}/routing) + ApplyScrollRouting(c *fiber.Ctx, id string) error + // Get stable backend routing targets + // (GET /api/v1/scrolls/{id}/routing/targets) + GetScrollRoutingTargets(c *fiber.Ctx, id string) error + // Start runtime scroll + // (POST /api/v1/scrolls/{id}/start) + StartScroll(c *fiber.Ctx, id string) error + // Stop runtime scroll workloads while preserving data + // (POST /api/v1/scrolls/{id}/stop) + StopScroll(c *fiber.Ctx, id string) error } // ServerInterfaceWrapper converts contexts to parameters. @@ -1069,6 +2620,12 @@ func (siw *ServerInterfaceWrapper) CreateScroll(c *fiber.Ctx) error { return siw.Handler.CreateScroll(c) } +// EnsureScroll operation middleware +func (siw *ServerInterfaceWrapper) EnsureScroll(c *fiber.Ctx) error { + + return siw.Handler.EnsureScroll(c) +} + // DeleteScroll operation middleware func (siw *ServerInterfaceWrapper) DeleteScroll(c *fiber.Ctx) error { @@ -1101,6 +2658,22 @@ func (siw *ServerInterfaceWrapper) GetScroll(c *fiber.Ctx) error { return siw.Handler.GetScroll(c, id) } +// BackupScroll operation middleware +func (siw *ServerInterfaceWrapper) BackupScroll(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.BackupScroll(c, id) +} + // RunScrollCommand operation middleware func (siw *ServerInterfaceWrapper) RunScrollCommand(c *fiber.Ctx) error { @@ -1125,6 +2698,54 @@ func (siw *ServerInterfaceWrapper) RunScrollCommand(c *fiber.Ctx) error { return siw.Handler.RunScrollCommand(c, id, command) } +// GetScrollConfig operation middleware +func (siw *ServerInterfaceWrapper) GetScrollConfig(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScrollConfig(c, id) +} + +// GetScrollConsoles operation middleware +func (siw *ServerInterfaceWrapper) GetScrollConsoles(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScrollConsoles(c, id) +} + +// GetScrollLogs operation middleware +func (siw *ServerInterfaceWrapper) GetScrollLogs(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScrollLogs(c, id) +} + // GetScrollPorts operation middleware func (siw *ServerInterfaceWrapper) GetScrollPorts(c *fiber.Ctx) error { @@ -1141,6 +2762,118 @@ func (siw *ServerInterfaceWrapper) GetScrollPorts(c *fiber.Ctx) error { return siw.Handler.GetScrollPorts(c, id) } +// GetScrollProcedures operation middleware +func (siw *ServerInterfaceWrapper) GetScrollProcedures(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScrollProcedures(c, id) +} + +// GetScrollQueue operation middleware +func (siw *ServerInterfaceWrapper) GetScrollQueue(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScrollQueue(c, id) +} + +// RestoreScroll operation middleware +func (siw *ServerInterfaceWrapper) RestoreScroll(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.RestoreScroll(c, id) +} + +// ApplyScrollRouting operation middleware +func (siw *ServerInterfaceWrapper) ApplyScrollRouting(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.ApplyScrollRouting(c, id) +} + +// GetScrollRoutingTargets operation middleware +func (siw *ServerInterfaceWrapper) GetScrollRoutingTargets(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.GetScrollRoutingTargets(c, id) +} + +// StartScroll operation middleware +func (siw *ServerInterfaceWrapper) StartScroll(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.StartScroll(c, id) +} + +// StopScroll operation middleware +func (siw *ServerInterfaceWrapper) StopScroll(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", c.Params("id"), &id, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter id: %w", err).Error()) + } + + return siw.Handler.StopScroll(c, id) +} + // FiberServerOptions provides options for the Fiber server. type FiberServerOptions struct { BaseURL string @@ -1168,47 +2901,83 @@ func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, option router.Post(options.BaseURL+"/api/v1/scrolls", wrapper.CreateScroll) + router.Post(options.BaseURL+"/api/v1/scrolls/ensure", wrapper.EnsureScroll) + router.Delete(options.BaseURL+"/api/v1/scrolls/:id", wrapper.DeleteScroll) router.Get(options.BaseURL+"/api/v1/scrolls/:id", wrapper.GetScroll) + router.Post(options.BaseURL+"/api/v1/scrolls/:id/backup", wrapper.BackupScroll) + router.Post(options.BaseURL+"/api/v1/scrolls/:id/commands/:command", wrapper.RunScrollCommand) + router.Get(options.BaseURL+"/api/v1/scrolls/:id/config", wrapper.GetScrollConfig) + + router.Get(options.BaseURL+"/api/v1/scrolls/:id/consoles", wrapper.GetScrollConsoles) + + router.Get(options.BaseURL+"/api/v1/scrolls/:id/logs", wrapper.GetScrollLogs) + router.Get(options.BaseURL+"/api/v1/scrolls/:id/ports", wrapper.GetScrollPorts) + router.Get(options.BaseURL+"/api/v1/scrolls/:id/procedures", wrapper.GetScrollProcedures) + + router.Get(options.BaseURL+"/api/v1/scrolls/:id/queue", wrapper.GetScrollQueue) + + router.Post(options.BaseURL+"/api/v1/scrolls/:id/restore", wrapper.RestoreScroll) + + router.Post(options.BaseURL+"/api/v1/scrolls/:id/routing", wrapper.ApplyScrollRouting) + + router.Get(options.BaseURL+"/api/v1/scrolls/:id/routing/targets", wrapper.GetScrollRoutingTargets) + + router.Post(options.BaseURL+"/api/v1/scrolls/:id/start", wrapper.StartScroll) + + router.Post(options.BaseURL+"/api/v1/scrolls/:id/stop", wrapper.StopScroll) + } // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/9RYX2/bOBL/KgTvHhXbufbuwW+5BO2l18MFSRd5aIOAJsc2a4pUhyMn3sDffUFSkiVL", - "TtbbdLF9MWRx/v5mOH/0xKXLC2fBkufTJ+7lEnIRH88RBMGNRGfMNXwrwVN4XaArAElDJBJIei5kPFHg", - "JeqCtLN8yv9/fsnqU4YwBwQrgTlkxklhmI+CWSFoyTMOjyIvDPBpI9GPFJZajRaLMYGn+DMNPzzjtCkC", - "qSfUdsG3GVeCxD06N2RHfBCGKQG5sydJe1AbbJkJuQKrgoFMOktCW20XDEtLOgcW5DKlESQ53IzY5Zy5", - "XBOByphguSBALYz+tc1Ti8zFhhWlMYyW0CAxGrJeq77ZF1AgSEGgmDBaeDZ3yKzIYcQaj5Irtd4KUK3G", - "kawD6lc380OaA+UzkD2roANG8DEBzBSgXoNnmtgcXV6xjTYiN+z3W5a4XjOmbTuEVbU7vgDJ5tqA/zHh", - "3WYc4VupERSfft5dmLuG0s2+gqTg9AUYIFDpzvUvW8qTPlQkqIwEO1hVkvSiOTqQVAKGLPoPCEPLa/CF", - "sx76JuVODSTQeYkIltgycrMkn0XaduzdaijyBboFgvd9sVfVCSsAJVgSC4jXwjihQoiCYSLQhoyaO8wF", - "8SmfGydC0cjFo87LnE9PJ5OM59qmf5PGBFvmM8AKUaR7JWjAt9sl2Ha6R9qIdKMxMJ6EZOEZt6UxYhbc", - "JSzhpXBEiIbicJ2y78oh3TTh7oZi5krbTpCZcwaEDdxL5+leF4PZE88Kh9Q61ZZgkaBYARRnRq/hE4r5", - "XMtBGUZ4uheS9FrT5l5EUYNoHCxA/Sw4aFGBToIq8QAfOnLSmcFDfLyfbSjB1dinLf3r7c62libvSpTD", - "aqiHRgvu6vAoXTWPWz0v80Fb5R6GbTrGu73Ei3FoY1tFoAVoVmXYzvkGoWcy9lAtaw8OPU+ky3NhVSJU", - "Sqdqf9US0LlMO6UyzizqqBTszA4HenPvtXuwgPeHCnLqXQdze6+3PVfQbShRn2u/eMaxtKGbxartiiK+", - "A0SHPGuK/t2Al2WhjkRmqFM0Ues60Qax637jTCc2HXP6yRMgAFmips1NmEer+gYCAc9KWu7+vav9+HD7", - "KQLQrtQfbj8xciuwaQ7RCixp2rAC3VorwGhqEB9uWhS3A2FJVMQ7FfhrnV3xN0uHdBJKo2LfSsBNrcwh", - "u4XZjZMroDCAWJB1U9KBMRLzuvYlFTvNotD/hQ3fBhS0nbugOE4x8a5s9528CEMyO/94yYworVyCj8NN", - "LqxYgK/nH8CTOLc0U48oCqNlapYZM3oFX+xChCEPcA3oszj6zoQHn0WBDzCrz0ZformaYhNvDOAZD6fJ", - "rMnodDSJF6UAKwrNp/xNfJXxMKbFgI5Focfr03GaEsKbBQwMe++B6l7bmSd4FJ4a/qVKhGlcifEK2Zum", - "lqjsH5NJjSTYqKUFwfirD6rq9Sc8/R1hzqf8b+PdfjSulqPx3lAUQ9W1OVFsAgD/nLz5ExXfAK61BFZa", - "sRY6TR7xPpV5LnBTwbmPI4mFD3e8ikS4zgFvfhdY6zClzPGtOHXh/6g93VQ03wm+Jsj9S2B0G8x2d4EQ", - "xWYIm+vOHuP3cAnm7606bWiqkzY2oUX6ASDaizNPRRQ8/dupzaslwtBuvu1W7NAht704nL6aCXvwvwQ3", - "q1tYF/XkyB7uz8PeT8nxk1bbVDhCA+xHJO1VTUQKgSIHAgwqnlJRrr5DVDU5NrsullkLl/1OefcDi013", - "J3wZ53oI2Gb87eRtv5zukVtHbB7num5gktqjApMN14X3QD8n8kdm+PciHgrzK9yDcT08j5+qp3g1hkvV", - "dWmTe+eJ9EeEKBsUIhuFP0mwf0kT636IvjPo16Xd/762Q+YPBD8sbYdbdHMVryLZX/A+HtP5Wx9Dju7+", - "LABVD0CvfnE70p+PY9x14lgdI3BouD+7CvN1iYZP+ZgHmCuhva0kGZDm/xwsxem9yikGjyDLSNkEuMnj", - "fUkf3YJ5QhC5tosoBYFQw1qYHbdxCz/AW5WTsBeV0DJmxxhPBjgHN6eoPe5JfifhAWY+Ug5ICanBwvYU", - "1kPtbPrgW8ejEhA/cPR505DN5BLkyg8yVmNyn/V/pSF9UuVAnRJD3teZ0BdxkTYdo+cgN9IMs1fp0+d+", - "pw2wB0FyWcdMwRqMK2ImVB9ga/wC2YCMM2sdJdTmQZyQEnzLe9Gce7692/4WAAD//++SYFRCGgAA", + "H4sIAAAAAAAC/+xa3XMbuQ3/VzhsH2XL6V374Ddf0rvmmptzrXTycJfxUFxIYsQlaZIrW/Xof+/wa7Uf", + "XH3ZvsaZvmRiEQCBHwASwPIRU1kqKUBYgy8fsaELKIn/75VSfH0jK8vE/AbuKjDW/ay0VKAtA09EjGFz", + "USZ2ZqH0//mzhhm+xH8ab8WPo+zxTSUsK8GJhquaH29G2K4V4EtMtCZrvNmMsIa7imko8OVvra0+17Ry", + "+gWoZ34ry5KIYmKJrcwvRHn1ioJZJgXh1w21ra4gJ0ADsTChWnI+bLC2bEaoXynAUM2U2wBf4l/fvkdp", + "FWmYgQZBAUmNuKSEI+MFI0XsAo8wPJBS8WBt4DHnha5YcT6fjy0Y6/+5dP/gWldjNRNzp2tBLLnVUub0", + "UMFiVBAopTgLu7ttnS5TQpcgCqcgolJYwgQTc6SDT5CTiwqmgVqp1+fo/QzJklkLxQgRVBILmhHO/tPk", + "SSJLskaq4hzZBdRInOe0Z0Vf7XegNFBioUCEM2LQTGokSAnnqLYomJL2jYCyYuzJWqB+kVOT29lR7oBs", + "5wYtMJyNAWBUgGYrMIhZNNOyjGzna1JydLhmges5fdrUg4gimWMUUDRjHMxLuddYoqMRM1Jx28m4qZQc", + "iOgneEqtXHa/Aw4WipCd/bQMEZXTxFaeYOuAIkjqK95RhzmSKCCn0d+FqfQxx8XuJB5IksEI3hdAf4hX", + "/gGE28UNGCWFgb75pSwy6fa20hqERQvPjQLGyNM2M0Uuc8GltJxrMKYv9jquIAWagrBkDv4Q4ZIULqCd", + "YsTRuvybSV0Siy/xjEvijtiSPLCyKvHlm4uLES6ZCH9d1CqIqpyCrpG8LYjN2PZpAaJ5OHhaH231jo7x", + "zKUWHmFRcU6mztyWMwZC0kOU80O8Uq+ir35Nlp4Wlxr6sTIj3DwxWKKS11LbSZ2XbbWmshLNsK/3GeGF", + "NPaWqazCfk1J3TSHCQvz4K8lgLribAUfNZnNGM3K4MTYW0ItWzG7viVeVNZlh2fksEZKSwpFpQf4tLSS", + "Sp53zsPtdG0DXLV+TNi/fb/VrbGTkZWm+W1sD40G3HHxqL0Sj1zulnnPRCHv8zodY10n+LwfmthGDzQA", + "HcUI2xpfI7QjYruVai9s4cGCFoTvis/jDnOn+O3w6q4AUdWUM7ojHSqdY9zstp+J+Uei55CxflBNt2AU", + "oUdnxz7jT80dA9wXtsOtQS4ku6gY0CtGYVjDRDBk4WFxe5t+78VwS4HOdjvCeKh22nkX0NBVmaO7Keq7", + "qeKok/Skgsgf3KB1cGtvWd4L0LcDvDpE9rO3rnU19pRqLVWuwtUhvyVA8QjrSrgC35enUin/WwBgVFe3", + "nzPwVqo40iW5krgOl7YRTe+1za+NaQVFS51c1IZw/SDnO3v5huOGMrf2SWcLn6m00syuJ86/sQQBokFf", + "VXax/evHBNXPnz56jJsV38+fPiIrlyBC98cKEJbZNVJarlgB2qPhxLvL0Ivb4rywVnnNHH/asy1+spDa", + "nrnqpUB3Feh12kxq9AmmE0mXYF3bJ4Cm4pY5Rk+M0x0TttjuTBT7J6zxxqHAxEy6jX3v6M+BTdfId7pi", + "BXr74T3ipBJ0Aca3lCURZA4mdZ2gz3y3WPeaRCnOaCi6R4izJfwu5sS11qBXoM3IDxymxIAZeYH3ME1r", + "5797dZn1zUCtAB5htxrUujh/c37hs1yBIIrhS/yd/2mEXXPsHTomio1Xb8ah23C/xFusbeFPYFPN3upL", + "sBceyun3RSAMbY/3l6+WfffjN/vLxUVCMlYKDQjGX4zbKk3Z9h02nebKu6qtc6Dwsf3Xi+/+wI0n4cpB", + "lSArwkIH4/OpKkui1xHOLo6WzI07RqIn3Inh8MafHWtyU4gc0/BTG/4PzNhJpHki+Mcc+fHy7A8pe9jc", + "tKZHpoOLU78zYGpCE1ea2LgKwGSAaI4rcTinwdgfZLF+tkDITUQ37UvB3f6bnh/ePJsKHfj3wY3SLdlG", + "PRjSwX037P2QHIOf+PgCKuuR5kTohTySGzod5JGL/5lHAmpdjwRDuqNWeGDGhqtFxpEnX4cZijnaXY+s", + "2IRz3pVEfXeFkWLtLkU0KcGCdls8hjs0DuvjFerLnzbQowZo3drp8ws6oT0O3e+EVBZuRvj7i+/7t1+H", + "XEiLZr5TbnstbHtUHo3yx/hPYF8n8keG/1MRd/foE48tlwdjV5dVavjs+sGvv7BLnv883Df4/NrOxgAz", + "cqwxIdun4gPQqpFg0WsneTx17uPH+L/NsPdvKhF0jh9RXyICRlkhtN7wlaT3v0PX2k3KJ6b5TSW6d+EW", + "mZOcL2ZsPlhE16fv20D3FZ7B3Wa954hros2204wG9w9PlSM7FVMjOZiDUA2UXyGuRwzx+pgnw9AS1lCg", + "qZ9yxKllH/qA3pmhUkGB6BaUE8Dncn4A8B8c1SsrKFpzrgzmzqZT8OYBi4R1/DNBPoy0ktoeAPW1DDX5", + "V4f1MV1941vk0Z09ckCl4cazV3kt6SdlTB0phzhzS/vKsqf36it3UyTrPJ65zFEdipMAv6uggv1Y/8uT", + "fYMwe8MGIU7hfdeg2sJ8F0HZfzxpMFbumsLcBIL/tzIv3ecGnA/uZZLjTsqtxoe6vNf9a9U4F4u0r8f1", + "uae2X5u7h/qels+vQRtmbHxqJfVZeLQLBQqvEpCufdMPgvD9el8IjK1/hnDApdZ6tvDqS5X2I4xDqpXA", + "gBJemXrRkinfPvDUHYYTfFS/G8sn6cQtf6Njv0l46Lc7PzzRs8zzjJVqF9BSfbM4+ycP+3CWqltQ30u9", + "5JIUBt0vGAekNPiXM2LuP0LvcUN81pNAHPo6fnX9HscXVniMHVJRaO+zflAqfEAvQVj/0SPOfBD4u9NR", + "1j6q50yP/f4QGauBlM4U4t+BW81gRfiW23d/fd5Y1sWibKvMljEUZn3O7NMDv7t/aGC2Eu5hajxlRorr", + "vxAT4SkKkyK8U09NTxTgz5w+b/hKjegC6NJkGeN35j7rLxW37CzGRQqTnPUpEvoi3oWnApzNgK4pz7PH", + "8Olz/+gC8J5Yukg+K2AFXCofCfEldMLPkWVkXAkhbUBt5sQRSsE0rCf1usGbz5v/BgAA//94laGN6jMA", + "AA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/internal/core/domain/runtime_scroll.go b/internal/core/domain/runtime_scroll.go index b2154143..377cf656 100644 --- a/internal/core/domain/runtime_scroll.go +++ b/internal/core/domain/runtime_scroll.go @@ -13,19 +13,43 @@ const ( ) type RuntimeScroll struct { - ID string `json:"id"` - OwnerID string `json:"owner_id,omitempty"` - Artifact string `json:"artifact"` - ScrollRoot string `json:"scroll_root"` - DataRoot string `json:"data_root"` - ScrollName string `json:"scroll_name"` - ScrollYAML string `json:"-"` - Status RuntimeScrollStatus `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Commands map[string]LockStatus `json:"commands,omitempty"` + ID string `json:"id"` + OwnerID string `json:"owner_id,omitempty"` + Artifact string `json:"artifact"` + ScrollRoot string `json:"scroll_root"` + DataRoot string `json:"data_root"` + ScrollName string `json:"scroll_name"` + ScrollYAML string `json:"-"` + Status RuntimeScrollStatus `json:"status"` + LastError string `json:"last_error,omitempty"` + Routing []RuntimeRouteAssignment `json:"routing,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Commands map[string]LockStatus `json:"commands,omitempty"` } type RuntimeState struct { Scrolls map[string]*RuntimeScroll `json:"scrolls"` } + +type RuntimeRoutingTarget struct { + Name string `json:"name"` + Procedure string `json:"procedure"` + PortName string `json:"port_name"` + Port int `json:"port"` + Protocol string `json:"protocol"` + Namespace string `json:"namespace,omitempty"` + ServiceName string `json:"service_name"` + ServicePort int `json:"service_port"` + Selector map[string]string `json:"selector,omitempty"` +} + +type RuntimeRouteAssignment struct { + Name string `json:"name"` + PortName string `json:"port_name,omitempty"` + Host string `json:"host,omitempty"` + ExternalIP string `json:"external_ip,omitempty"` + PublicPort int `json:"public_port,omitempty"` + URL string `json:"url,omitempty"` + Protocol string `json:"protocol,omitempty"` +} diff --git a/internal/core/domain/scroll.go b/internal/core/domain/scroll.go index 5c56a0f6..c76e8ff3 100644 --- a/internal/core/domain/scroll.go +++ b/internal/core/domain/scroll.go @@ -119,6 +119,14 @@ func (p *Procedure) IsSignal() bool { return p.Kind() == ProcedureTypeSignal } +func ProcedureName(commandName string, idx int, procedure *Procedure) string { + name := fmt.Sprintf("%s.%d", commandName, idx) + if procedure != nil && procedure.Id != nil { + name = *procedure.Id + } + return name +} + func (p *Procedure) hasContainerFields() bool { return p.Image != "" || len(p.Command) > 0 || diff --git a/internal/core/ports/services_ports.go b/internal/core/ports/services_ports.go index c44fb376..f564db80 100644 --- a/internal/core/ports/services_ports.go +++ b/internal/core/ports/services_ports.go @@ -43,11 +43,32 @@ type RuntimeBackendInterface interface { Signal(commandName string, target string, signal string, dataRoot string) error } +type RuntimeLifecycleBackendInterface interface { + StopRuntime(dataRoot string) error + DeleteRuntime(dataRoot string, purgeData bool) error +} + +type RuntimeRoutingBackendInterface interface { + RoutingTargets(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) +} + +type RuntimeBackupBackendInterface interface { + BackupRuntime(ctx context.Context, dataRoot string, artifact string) error + RestoreRuntime(ctx context.Context, dataRoot string, artifact string) error +} + +type RuntimeFileBackendInterface interface { + ReadDataFile(ctx context.Context, dataRoot string, relativePath string) ([]byte, error) + WriteDataFile(ctx context.Context, dataRoot string, relativePath string, data []byte) error +} + type RuntimeCommand struct { - Name string - Command *domain.CommandInstructionSet - DataRoot string - GlobalPorts []domain.Port + Name string + ScrollID string + Command *domain.CommandInstructionSet + DataRoot string + GlobalPorts []domain.Port + ProcedureEnv map[string]map[string]string } type RuntimeMaterialization struct { diff --git a/internal/core/services/procedure_launcher.go b/internal/core/services/procedure_launcher.go index 27d9b48b..90807818 100644 --- a/internal/core/services/procedure_launcher.go +++ b/internal/core/services/procedure_launcher.go @@ -11,28 +11,54 @@ import ( ) type ProcedureLauncher struct { - runtimeBackend ports.RuntimeBackendInterface - runtimeDataRoot string - scrollService ports.ScrollServiceInterface - procedures map[string]domain.ScrollLockStatus - proceduresMutex *sync.Mutex + runtimeBackend ports.RuntimeBackendInterface + runtimeDataRoot string + runtimeScrollID string + runtimeScrollName string + routingProvider func() []domain.RuntimeRouteAssignment + scrollService ports.ScrollServiceInterface + procedures map[string]domain.ScrollLockStatus + proceduresMutex *sync.Mutex } func NewProcedureLauncher( scrollService ports.ScrollServiceInterface, runtimeBackend ports.RuntimeBackendInterface, runtimeDataRoot string, +) (*ProcedureLauncher, error) { + return NewProcedureLauncherForScroll(scrollService, runtimeBackend, runtimeDataRoot, "") +} + +func NewProcedureLauncherForScroll( + scrollService ports.ScrollServiceInterface, + runtimeBackend ports.RuntimeBackendInterface, + runtimeDataRoot string, + runtimeScrollID string, +) (*ProcedureLauncher, error) { + return NewProcedureLauncherForRuntime(scrollService, runtimeBackend, runtimeDataRoot, runtimeScrollID, "", nil) +} + +func NewProcedureLauncherForRuntime( + scrollService ports.ScrollServiceInterface, + runtimeBackend ports.RuntimeBackendInterface, + runtimeDataRoot string, + runtimeScrollID string, + runtimeScrollName string, + routingProvider func() []domain.RuntimeRouteAssignment, ) (*ProcedureLauncher, error) { if runtimeBackend == nil { return nil, errors.New("runtime backend is required") } s := &ProcedureLauncher{ - runtimeBackend: runtimeBackend, - runtimeDataRoot: runtimeDataRoot, - scrollService: scrollService, - procedures: make(map[string]domain.ScrollLockStatus), - proceduresMutex: &sync.Mutex{}, + runtimeBackend: runtimeBackend, + runtimeDataRoot: runtimeDataRoot, + runtimeScrollID: runtimeScrollID, + runtimeScrollName: runtimeScrollName, + routingProvider: routingProvider, + scrollService: scrollService, + procedures: make(map[string]domain.ScrollLockStatus), + proceduresMutex: &sync.Mutex{}, } return s, nil @@ -66,12 +92,29 @@ func (sc *ProcedureLauncher) Run(cmd string) error { if dataRoot == "" { dataRoot = sc.scrollService.GetCwd() } + file := sc.scrollService.GetFile() + routing := []domain.RuntimeRouteAssignment{} + if sc.routingProvider != nil { + routing = sc.routingProvider() + } + procedureEnv, err := BuildRuntimeProcedureEnv(file, cmd, command, RuntimeEnvContext{ + ScrollID: sc.runtimeScrollID, + ScrollName: sc.runtimeScrollName, + Backend: sc.runtimeBackend.Name(), + Routing: routing, + }) + if err != nil { + sc.setProcedureStatus(cmd, domain.ScrollLockStatusError) + return err + } sc.setProcedureStatus(cmd, domain.ScrollLockStatusRunning) exitCode, err := sc.runtimeBackend.RunCommand(ports.RuntimeCommand{ - Name: cmd, - Command: command, - DataRoot: dataRoot, - GlobalPorts: sc.scrollService.GetFile().Ports, + Name: cmd, + ScrollID: sc.runtimeScrollID, + Command: command, + DataRoot: dataRoot, + GlobalPorts: file.Ports, + ProcedureEnv: procedureEnv, }) if err != nil { sc.setProcedureStatus(cmd, domain.ScrollLockStatusError) diff --git a/internal/core/services/procedure_launcher_test.go b/internal/core/services/procedure_launcher_test.go index 0a177125..66530384 100644 --- a/internal/core/services/procedure_launcher_test.go +++ b/internal/core/services/procedure_launcher_test.go @@ -26,6 +26,7 @@ func TestProcedureLauncherPassesCommandContextToRuntimeBackend(t *testing.T) { scrollService.EXPECT().GetCommand("serve").Return(command, nil) scrollService.EXPECT().GetFile().Return(file) + runtimeBackend.EXPECT().Name().Return("docker") runtimeBackend.EXPECT().RunCommand(gomock.Any()).DoAndReturn(func(runtimeCommand ports.RuntimeCommand) (*int, error) { if runtimeCommand.Name != "serve" { t.Fatalf("Name = %s, want serve", runtimeCommand.Name) @@ -50,3 +51,141 @@ func TestProcedureLauncherPassesCommandContextToRuntimeBackend(t *testing.T) { t.Fatal(err) } } + +func TestProcedureLauncherPassesScrollIDToRuntimeBackend(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) + command := &domain.CommandInstructionSet{ + Procedures: []*domain.Procedure{{Image: "alpine:3.20"}}, + } + scrollService.EXPECT().GetCommand("serve").Return(command, nil) + scrollService.EXPECT().GetFile().Return(&domain.File{}) + runtimeBackend.EXPECT().Name().Return("docker") + runtimeBackend.EXPECT().RunCommand(gomock.Any()).DoAndReturn(func(runtimeCommand ports.RuntimeCommand) (*int, error) { + if runtimeCommand.ScrollID != "scroll-a" { + t.Fatalf("ScrollID = %s, want scroll-a", runtimeCommand.ScrollID) + } + return nil, nil + }) + + launcher, err := services.NewProcedureLauncherForScroll(scrollService, runtimeBackend, "/runtime-data", "scroll-a") + if err != nil { + t.Fatal(err) + } + if err := launcher.Run("serve"); err != nil { + t.Fatal(err) + } +} + +func TestProcedureLauncherBuildsStableRuntimeEnv(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) + command := &domain.CommandInstructionSet{ + Procedures: []*domain.Procedure{{ + Id: ptrString("web"), + Image: "alpine:3.20", + Env: map[string]string{ + "APP_ENV": "test", + "DRUID_PORT_HTTP": "user-value", + "DRUID_SCROLL_NAME": "user-name", + }, + }}, + } + file := &domain.File{ + Name: "scroll-name", + Ports: []domain.Port{{ + Name: "http", + Port: 8080, + Protocol: "http", + }}, + } + + scrollService.EXPECT().GetCommand("serve").Return(command, nil) + scrollService.EXPECT().GetFile().Return(file) + runtimeBackend.EXPECT().Name().Return("kubernetes") + runtimeBackend.EXPECT().RunCommand(gomock.Any()).DoAndReturn(func(runtimeCommand ports.RuntimeCommand) (*int, error) { + env := runtimeCommand.ProcedureEnv["web"] + if env["APP_ENV"] != "test" { + t.Fatalf("APP_ENV = %q, want test", env["APP_ENV"]) + } + if env["DRUID_PORT_HTTP"] != "8080" || env["DRUID_PORT_HTTP_1"] != "8080" { + t.Fatalf("port env = %#v", env) + } + if env["DRUID_SCROLL_ID"] != "scroll-a" || env["DRUID_SCROLL_NAME"] != "scroll-name" { + t.Fatalf("scroll env = %#v", env) + } + if env["DRUID_RUNTIME_BACKEND"] != "kubernetes" { + t.Fatalf("backend env = %#v", env) + } + if env["DRUID_PORT_HTTP_PUBLIC"] != "443" || env["DRUID_PORT_HTTP_HOST"] != "web.example.test" || env["DRUID_PORT_HTTP_URL"] != "https://web.example.test" { + t.Fatalf("routing env = %#v", env) + } + if env["DRUID_IP"] != "192.0.2.10" || env["DRUID_PORT_HTTP_IP"] != "192.0.2.10" { + t.Fatalf("ip env = %#v", env) + } + if _, ok := env["DRUID_IP_WAIT"]; ok { + t.Fatalf("DRUID_IP_WAIT should not be set after routing: %#v", env) + } + return nil, nil + }) + + launcher, err := services.NewProcedureLauncherForRuntime(scrollService, runtimeBackend, "/runtime-data", "scroll-a", "", func() []domain.RuntimeRouteAssignment { + return []domain.RuntimeRouteAssignment{{ + Name: "web-http", + PortName: "http", + Host: "web.example.test", + ExternalIP: "192.0.2.10", + PublicPort: 443, + URL: "https://web.example.test", + }} + }) + if err != nil { + t.Fatal(err) + } + if err := launcher.Run("serve"); err != nil { + t.Fatal(err) + } +} + +func TestBuildRuntimeProcedureEnvSetsWaitBeforeRouting(t *testing.T) { + command := &domain.CommandInstructionSet{ + Procedures: []*domain.Procedure{{Image: "alpine:3.20"}}, + } + envs, err := services.BuildRuntimeProcedureEnv(&domain.File{ + Name: "scroll-name", + Ports: []domain.Port{{Name: "game-port", Port: 7777}}, + }, "serve", command, services.RuntimeEnvContext{ScrollID: "scroll-a", Backend: "docker"}) + if err != nil { + t.Fatal(err) + } + env := envs["serve.0"] + if env["DRUID_IP_WAIT"] != "true" { + t.Fatalf("env = %#v, want DRUID_IP_WAIT", env) + } + if env["DRUID_PORT_GAME_PORT"] != "7777" { + t.Fatalf("env = %#v, want normalized port env", env) + } +} + +func TestBuildRuntimeProcedureEnvRejectsDuplicateNormalizedPortNames(t *testing.T) { + _, err := services.BuildRuntimeProcedureEnv(&domain.File{ + Name: "scroll-name", + Ports: []domain.Port{ + {Name: "web-port", Port: 8080}, + {Name: "web_port", Port: 8081}, + }, + }, "serve", &domain.CommandInstructionSet{Procedures: []*domain.Procedure{{Image: "alpine:3.20"}}}, services.RuntimeEnvContext{}) + if err == nil { + t.Fatal("expected duplicate normalized port names to fail") + } +} + +func ptrString(value string) *string { + return &value +} diff --git a/internal/core/services/queue_manager.go b/internal/core/services/queue_manager.go index 6f908982..77cd5653 100644 --- a/internal/core/services/queue_manager.go +++ b/internal/core/services/queue_manager.go @@ -292,6 +292,9 @@ func (sc *QueueManager) RunQueue() { zap.String("command", cmd), zap.Error(err), ) + sc.mu.Lock() + delete(sc.commandQueue, cmd) + sc.mu.Unlock() continue } diff --git a/internal/core/services/queue_manager_test.go b/internal/core/services/queue_manager_test.go index dc3e3149..17d079bb 100644 --- a/internal/core/services/queue_manager_test.go +++ b/internal/core/services/queue_manager_test.go @@ -62,6 +62,7 @@ func TestQueueManager(t *testing.T) { queueManager := services.NewQueueManager(scrollService, procedureLauncher) exitCode := 0 + runtimeBackend.EXPECT().Name().Return("docker").AnyTimes() runtimeBackend.EXPECT().RunCommand(gomock.Any()).Return(&exitCode, nil).Times(testCase.AccualExecution) scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{ @@ -159,6 +160,7 @@ func TestQueueManager(t *testing.T) { queueManager := services.NewQueueManager(scrollService, procedureLauncher) exitCode := 0 + runtimeBackend.EXPECT().Name().Return("docker").AnyTimes() runtimeBackend.EXPECT().RunCommand(gomock.Any()).Return(&exitCode, nil).Times(4) scrollService.EXPECT().GetCommand("test").Return(&domain.CommandInstructionSet{ diff --git a/internal/core/services/runtime_env.go b/internal/core/services/runtime_env.go new file mode 100644 index 00000000..f0c38989 --- /dev/null +++ b/internal/core/services/runtime_env.go @@ -0,0 +1,130 @@ +package services + +import ( + "fmt" + "strconv" + "strings" + + "github.com/highcard-dev/daemon/internal/core/domain" +) + +type RuntimeEnvContext struct { + ScrollID string + ScrollName string + Backend string + Routing []domain.RuntimeRouteAssignment +} + +func BuildRuntimeProcedureEnv(file *domain.File, commandName string, command *domain.CommandInstructionSet, context RuntimeEnvContext) (map[string]map[string]string, error) { + if file == nil { + return nil, fmt.Errorf("scroll file is required") + } + if command == nil { + return nil, fmt.Errorf("command is required") + } + base, err := runtimeEnv(file, context) + if err != nil { + return nil, err + } + result := make(map[string]map[string]string, len(command.Procedures)) + for idx, procedure := range command.Procedures { + if procedure == nil { + continue + } + env := map[string]string{} + for key, value := range procedure.Env { + env[key] = value + } + for key, value := range base { + env[key] = value + } + result[domain.ProcedureName(commandName, idx, procedure)] = env + } + return result, nil +} + +func runtimeEnv(file *domain.File, context RuntimeEnvContext) (map[string]string, error) { + env := map[string]string{} + if context.ScrollID != "" { + env["DRUID_SCROLL_ID"] = context.ScrollID + } + scrollName := context.ScrollName + if scrollName == "" { + scrollName = file.Name + } + if scrollName != "" { + env["DRUID_SCROLL_NAME"] = scrollName + } + if context.Backend != "" { + env["DRUID_RUNTIME_BACKEND"] = context.Backend + } + + seen := map[string]string{} + for _, port := range file.Ports { + suffix := envSuffix(port.Name) + if suffix == "" { + return nil, fmt.Errorf("port name is required for runtime env") + } + if previous := seen[suffix]; previous != "" { + return nil, fmt.Errorf("port names %q and %q normalize to the same env name", previous, port.Name) + } + seen[suffix] = port.Name + env["DRUID_PORT_"+suffix] = strconv.Itoa(port.Port) + env["DRUID_PORT_"+suffix+"_1"] = strconv.Itoa(port.Port) + if port.Protocol != "" { + env["DRUID_PORT_"+suffix+"_PROTOCOL"] = port.Protocol + } + } + + if len(context.Routing) == 0 { + env["DRUID_IP_WAIT"] = "true" + return env, nil + } + for _, assignment := range context.Routing { + portName := assignment.PortName + if portName == "" { + portName = assignment.Name + } + suffix := envSuffix(portName) + if suffix == "" { + continue + } + if assignment.ExternalIP != "" { + env["DRUID_PORT_"+suffix+"_IP"] = assignment.ExternalIP + if env["DRUID_IP"] == "" { + env["DRUID_IP"] = assignment.ExternalIP + } + } + if assignment.PublicPort > 0 { + env["DRUID_PORT_"+suffix+"_PUBLIC"] = strconv.Itoa(assignment.PublicPort) + } + if assignment.Host != "" { + env["DRUID_PORT_"+suffix+"_HOST"] = assignment.Host + } + if assignment.URL != "" { + env["DRUID_PORT_"+suffix+"_URL"] = assignment.URL + } + } + return env, nil +} + +func envSuffix(name string) string { + var b strings.Builder + lastUnderscore := false + for _, r := range name { + switch { + case r >= 'a' && r <= 'z': + b.WriteRune(r - ('a' - 'A')) + lastUnderscore = false + case r >= 'A' && r <= 'Z', r >= '0' && r <= '9': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore && b.Len() > 0 { + b.WriteByte('_') + lastUnderscore = true + } + } + } + return strings.TrimRight(b.String(), "_") +} diff --git a/internal/core/services/runtime_scroll_manager.go b/internal/core/services/runtime_scroll_manager.go index e5b331f3..7850ab0e 100644 --- a/internal/core/services/runtime_scroll_manager.go +++ b/internal/core/services/runtime_scroll_manager.go @@ -124,6 +124,9 @@ func MaterializeScrollArtifact(artifact string, scrollRoot string, dataRoot stri if err := materializeLocalArtifact(artifact, scrollRoot); err != nil { return err } + if scrollRoot == dataRoot { + return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) + } return moveRuntimeData(scrollRoot, dataRoot) } if ociRegistry == nil { @@ -133,6 +136,9 @@ func MaterializeScrollArtifact(artifact string, scrollRoot string, dataRoot stri return err } if includeData { + if scrollRoot == dataRoot { + return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) + } return moveRuntimeData(scrollRoot, dataRoot) } return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) @@ -160,6 +166,23 @@ func moveRuntimeData(scrollRoot string, dataRoot string) error { } func MoveMaterializedScroll(srcScrollRoot string, srcDataRoot string, dstScrollRoot string, dstDataRoot string) error { + if srcScrollRoot == srcDataRoot && dstScrollRoot == dstDataRoot { + if localPathExists(dstScrollRoot) { + return fmt.Errorf("target scroll root already exists: %s", dstScrollRoot) + } + if err := os.MkdirAll(filepath.Dir(dstScrollRoot), 0755); err != nil { + return err + } + if err := os.Rename(srcScrollRoot, dstScrollRoot); err != nil { + if err := copyDir(srcScrollRoot, dstScrollRoot); err != nil { + return err + } + if err := os.RemoveAll(srcScrollRoot); err != nil { + return err + } + } + return nil + } if localPathExists(dstScrollRoot) { return fmt.Errorf("target scroll root already exists: %s", dstScrollRoot) } diff --git a/internal/core/services/runtime_scroll_manager_test.go b/internal/core/services/runtime_scroll_manager_test.go index 6cb87a71..18aa725f 100644 --- a/internal/core/services/runtime_scroll_manager_test.go +++ b/internal/core/services/runtime_scroll_manager_test.go @@ -2,6 +2,7 @@ package services import ( "errors" + "os" "path/filepath" "testing" ) @@ -53,3 +54,39 @@ func TestRuntimeScrollManagerCreateFailsDuplicateID(t *testing.T) { t.Fatalf("error = %v, want ErrScrollAlreadyExists", err) } } + +func TestRuntimeStateStoreUsesSingleRuntimeRoot(t *testing.T) { + store := NewRuntimeStateStore(t.TempDir()) + if got, want := store.DataRoot("scroll-a"), store.ScrollRoot("scroll-a"); got != want { + t.Fatalf("DataRoot = %s, want %s", got, want) + } +} + +func TestMaterializeScrollArtifactKeepsScrollYamlNextToData(t *testing.T) { + artifact := t.TempDir() + if err := os.WriteFile(filepath.Join(artifact, "scroll.yaml"), []byte(testScrollYAML), 0644); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(filepath.Join(artifact, "data", "private"), 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(artifact, "data", "private", "state.txt"), []byte("ok"), 0644); err != nil { + t.Fatal(err) + } + root := t.TempDir() + + if err := MaterializeScrollArtifact(artifact, root, root, nil, true); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(filepath.Join(root, "scroll.yaml")); err != nil { + t.Fatalf("scroll.yaml not materialized next to data: %v", err) + } + got, err := os.ReadFile(filepath.Join(root, "data", "private", "state.txt")) + if err != nil { + t.Fatal(err) + } + if string(got) != "ok" { + t.Fatalf("state = %q, want ok", got) + } +} diff --git a/internal/core/services/runtime_state_store.go b/internal/core/services/runtime_state_store.go index 1d62e5f4..d690ede0 100644 --- a/internal/core/services/runtime_state_store.go +++ b/internal/core/services/runtime_state_store.go @@ -43,11 +43,11 @@ func (s *RuntimeStateStore) StateDir() string { } func (s *RuntimeStateStore) ScrollRoot(id string) string { - return filepath.Join(s.stateDir, "scrolls", id, "spec") + return filepath.Join(s.stateDir, "scrolls", id) } func (s *RuntimeStateStore) DataRoot(id string) string { - return filepath.Join(s.stateDir, "data", id) + return s.ScrollRoot(id) } func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { @@ -70,11 +70,15 @@ func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { if err != nil { return err } + routing, err := json.Marshal(scroll.Routing) + if err != nil { + return err + } _, err = db.Exec(` - INSERT INTO scrolls (id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - `, scroll.ID, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, formatTime(scroll.CreatedAt), formatTime(scroll.UpdatedAt), string(commands)) + INSERT INTO scrolls (id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, scroll.ID, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, scroll.LastError, formatTime(scroll.CreatedAt), formatTime(scroll.UpdatedAt), string(commands), string(routing)) if err != nil { return fmt.Errorf("create runtime scroll %s: %w", scroll.ID, err) } @@ -89,7 +93,7 @@ func (s *RuntimeStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { defer db.Close() rows, err := db.Query(` - SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json + SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json FROM scrolls ORDER BY id `) @@ -117,7 +121,7 @@ func (s *RuntimeStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) defer db.Close() row := db.QueryRow(` - SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json + SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json FROM scrolls WHERE id = ? `, id) @@ -140,11 +144,15 @@ func (s *RuntimeStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { if err != nil { return err } + routing, err := json.Marshal(scroll.Routing) + if err != nil { + return err + } res, err := db.Exec(` UPDATE scrolls - SET owner_id = ?, artifact = ?, scroll_root = ?, data_root = ?, scroll_name = ?, scroll_yaml = ?, status = ?, updated_at = ?, commands_json = ? + SET owner_id = ?, artifact = ?, scroll_root = ?, data_root = ?, scroll_name = ?, scroll_yaml = ?, status = ?, last_error = ?, updated_at = ?, commands_json = ?, routing_json = ? WHERE id = ? - `, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, formatTime(scroll.UpdatedAt), string(commands), scroll.ID) + `, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, scroll.LastError, formatTime(scroll.UpdatedAt), string(commands), string(routing), scroll.ID) if err != nil { return err } @@ -201,9 +209,11 @@ func (s *RuntimeStateStore) open() (*sql.DB, error) { scroll_name TEXT NOT NULL, scroll_yaml TEXT NOT NULL DEFAULT '', status TEXT NOT NULL, + last_error TEXT NOT NULL DEFAULT '', created_at TEXT NOT NULL, updated_at TEXT NOT NULL, - commands_json TEXT NOT NULL DEFAULT '{}' + commands_json TEXT NOT NULL DEFAULT '{}', + routing_json TEXT NOT NULL DEFAULT '[]' ) `); err != nil { db.Close() @@ -217,6 +227,14 @@ func (s *RuntimeStateStore) open() (*sql.DB, error) { db.Close() return nil, err } + if err := ensureColumn(db, "scrolls", "last_error", "TEXT NOT NULL DEFAULT ''"); err != nil { + db.Close() + return nil, err + } + if err := ensureColumn(db, "scrolls", "routing_json", "TEXT NOT NULL DEFAULT '[]'"); err != nil { + db.Close() + return nil, err + } if err := removeRuntimeColumn(db); err != nil { db.Close() return nil, err @@ -239,9 +257,11 @@ func removeRuntimeColumn(db *sql.DB) error { scroll_name TEXT NOT NULL, scroll_yaml TEXT NOT NULL DEFAULT '', status TEXT NOT NULL, + last_error TEXT NOT NULL DEFAULT '', created_at TEXT NOT NULL, updated_at TEXT NOT NULL, - commands_json TEXT NOT NULL DEFAULT '{}' + commands_json TEXT NOT NULL DEFAULT '{}', + routing_json TEXT NOT NULL DEFAULT '[]' ) `); err != nil { return err @@ -304,13 +324,16 @@ type runtimeScrollScanner interface { func scanRuntimeScroll(scanner runtimeScrollScanner) (*domain.RuntimeScroll, error) { var scroll domain.RuntimeScroll var status string + var lastError string var createdAt string var updatedAt string var commandsJSON string - if err := scanner.Scan(&scroll.ID, &scroll.OwnerID, &scroll.Artifact, &scroll.ScrollRoot, &scroll.DataRoot, &scroll.ScrollName, &scroll.ScrollYAML, &status, &createdAt, &updatedAt, &commandsJSON); err != nil { + var routingJSON string + if err := scanner.Scan(&scroll.ID, &scroll.OwnerID, &scroll.Artifact, &scroll.ScrollRoot, &scroll.DataRoot, &scroll.ScrollName, &scroll.ScrollYAML, &status, &lastError, &createdAt, &updatedAt, &commandsJSON, &routingJSON); err != nil { return nil, err } scroll.Status = domain.RuntimeScrollStatus(status) + scroll.LastError = lastError scroll.CreatedAt = parseTime(createdAt) scroll.UpdatedAt = parseTime(updatedAt) if commandsJSON == "" { @@ -322,6 +345,12 @@ func scanRuntimeScroll(scanner runtimeScrollScanner) (*domain.RuntimeScroll, err if scroll.Commands == nil { scroll.Commands = map[string]domain.LockStatus{} } + if routingJSON == "" { + routingJSON = "[]" + } + if err := json.Unmarshal([]byte(routingJSON), &scroll.Routing); err != nil { + return nil, err + } return &scroll, nil } diff --git a/internal/runtime/docker/backend.go b/internal/runtime/docker/backend.go index cd39de27..7824b983 100644 --- a/internal/runtime/docker/backend.go +++ b/internal/runtime/docker/backend.go @@ -18,6 +18,7 @@ import ( "time" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" @@ -58,9 +59,40 @@ func (b *Backend) ReadScrollFile(scrollRoot string) ([]byte, error) { return os.ReadFile(filepath.Join(scrollRoot, "scroll.yaml")) } +func (b *Backend) ReadDataFile(_ context.Context, dataRoot string, relativePath string) ([]byte, error) { + filePath, err := dataFilePath(dataRoot, relativePath) + if err != nil { + return nil, err + } + return os.ReadFile(filePath) +} + +func (b *Backend) WriteDataFile(_ context.Context, dataRoot string, relativePath string, data []byte) error { + filePath, err := dataFilePath(dataRoot, relativePath) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { + return err + } + return os.WriteFile(filePath, data, 0644) +} + +func dataFilePath(dataRoot string, relativePath string) (string, error) { + cleaned := filepath.Clean(strings.TrimPrefix(relativePath, "/")) + if cleaned == "." || cleaned == ".." || filepath.IsAbs(cleaned) || strings.HasPrefix(cleaned, ".."+string(os.PathSeparator)) { + return "", fmt.Errorf("invalid data file path %q", relativePath) + } + return filepath.Join(dataRoot, cleaned), nil +} + func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { for idx, procedure := range command.Command.Procedures { - procedureName := commandProcedureName(command.Name, idx, procedure) + procedureName := domain.ProcedureName(command.Name, idx, procedure) + env := command.ProcedureEnv[procedureName] + if env == nil { + env = procedure.Env + } if command.Command.Run == domain.RunModePersistent { if procedure.IsSignal() { if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.DataRoot); err != nil { @@ -71,12 +103,12 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { if procedure.Image == "" { return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) } - if err := b.startPersistentContainer(procedureName, procedure, command.DataRoot, command.GlobalPorts); err != nil { + if err := b.startPersistentContainer(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.DataRoot, command.GlobalPorts, env); err != nil { return nil, err } continue } - exitCode, err := b.runProcedure(procedureName, procedure, command.DataRoot, command.GlobalPorts) + exitCode, err := b.runProcedure(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.DataRoot, command.GlobalPorts, env) if err != nil { return exitCode, err } @@ -90,14 +122,14 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { return nil, nil } -func (b *Backend) runProcedure(procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*int, error) { +func (b *Backend) runProcedure(consoleID string, procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*int, error) { if procedure.IsSignal() { return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, dataRoot) } if procedure.Image == "" { return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) } - return b.runContainer(procedureName, procedure, dataRoot, globalPorts) + return b.runContainer(consoleID, procedureName, procedure, dataRoot, globalPorts, env) } func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { @@ -155,7 +187,45 @@ func (b *Backend) Signal(_ string, target string, signal string, dataRoot string return b.client.ContainerStop(ctx, containerID, options) } -func (b *Backend) runContainer(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*int, error) { +func (b *Backend) StopRuntime(dataRoot string) error { + if dataRoot == "" { + return fmt.Errorf("data root is required") + } + ctx := context.Background() + items, err := b.client.ContainerList(ctx, container.ListOptions{ + All: true, + Filters: filters.NewArgs(filters.Arg("label", "druid.data-root-hash="+dataRootHash(dataRoot))), + }) + if err != nil { + return err + } + for _, item := range items { + if err := b.client.ContainerRemove(ctx, item.ID, container.RemoveOptions{Force: true}); err != nil { + return err + } + } + b.mu.Lock() + for key := range b.containers { + delete(b.containers, key) + } + for key := range b.stdin { + delete(b.stdin, key) + } + b.mu.Unlock() + return nil +} + +func (b *Backend) DeleteRuntime(dataRoot string, purgeData bool) error { + if err := b.StopRuntime(dataRoot); err != nil { + return err + } + if purgeData { + return os.RemoveAll(dataRoot) + } + return nil +} + +func (b *Backend) runContainer(consoleID string, commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*int, error) { ctx := context.Background() if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { return nil, err @@ -168,7 +238,7 @@ func (b *Backend) runContainer(commandName string, procedure *domain.Procedure, return nil, err } - config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts) + config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts, env) if err != nil { return nil, err } @@ -203,7 +273,7 @@ func (b *Backend) runContainer(commandName string, procedure *domain.Procedure, if procedure.TTY { consoleType = domain.ConsoleTypeTTY } - console, doneChan := b.consoleManager.AddConsoleWithChannel(commandName, consoleType, "stdin", combined) + console, doneChan := b.consoleManager.AddConsoleWithChannel(consoleID, consoleType, "stdin", combined) console.WriteInput = func(data string) error { return b.Attach(commandName, data) } @@ -242,7 +312,7 @@ func (b *Backend) runContainer(commandName string, procedure *domain.Procedure, return &exitCode, nil } -func (b *Backend) startPersistentContainer(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) error { +func (b *Backend) startPersistentContainer(consoleID string, commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) error { ctx := context.Background() if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { return err @@ -253,7 +323,7 @@ func (b *Backend) startPersistentContainer(commandName string, procedure *domain if err := b.pullImage(ctx, procedure.Image); err != nil { return err } - config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts) + config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts, env) if err != nil { return err } @@ -281,7 +351,7 @@ func (b *Backend) startPersistentContainer(commandName string, procedure *domain if procedure.TTY { consoleType = domain.ConsoleTypeTTY } - console, _ := b.consoleManager.AddConsoleWithChannel(commandName, consoleType, "stdin", combined) + console, _ := b.consoleManager.AddConsoleWithChannel(consoleID, consoleType, "stdin", combined) console.WriteInput = func(data string) error { return b.Attach(commandName, data) } @@ -373,7 +443,7 @@ func (w channelWriter) Write(p []byte) (int, error) { return len(p), nil } -func containerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*container.Config, *container.HostConfig, error) { +func containerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*container.Config, *container.HostConfig, error) { if procedure.Image == "" { return nil, nil, errors.New("docker image is required") } @@ -422,7 +492,7 @@ func containerSpec(commandName string, procedure *domain.Procedure, dataRoot str Image: procedure.Image, Cmd: procedure.Command, WorkingDir: procedure.WorkingDir, - Env: envArgs(procedure.Env), + Env: envArgs(env), ExposedPorts: exposedPorts, AttachStdin: true, AttachStdout: true, @@ -430,7 +500,8 @@ func containerSpec(commandName string, procedure *domain.Procedure, dataRoot str OpenStdin: true, Tty: procedure.TTY, Labels: map[string]string{ - "druid.command": commandName, + "druid.command": commandName, + "druid.data-root-hash": dataRootHash(dataRoot), }, }, &container.HostConfig{ Binds: binds, @@ -444,6 +515,11 @@ func ContainerName(scrollRoot string, commandName string) string { return fmt.Sprintf("druid-%s-%s", hex.EncodeToString(hash[:])[:10], name) } +func dataRootHash(dataRoot string) string { + hash := sha1.Sum([]byte(dataRoot)) + return hex.EncodeToString(hash[:])[:10] +} + func commandProcedureName(commandName string, idx int, procedure *domain.Procedure) string { procedureName := fmt.Sprintf("%s.%d", commandName, idx) if procedure != nil && procedure.Id != nil { @@ -452,6 +528,13 @@ func commandProcedureName(commandName string, idx int, procedure *domain.Procedu return procedureName } +func runtimeConsoleID(scrollID string, procedureName string) string { + if scrollID == "" { + return procedureName + } + return scrollID + "/" + procedureName +} + func sanitizeContainerName(name string) string { re := regexp.MustCompile(`[^a-zA-Z0-9_.-]+`) name = re.ReplaceAllString(name, "-") @@ -489,7 +572,11 @@ type ContainerSpec struct { } func BuildContainerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*ContainerSpec, error) { - config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts) + return BuildContainerSpecWithEnv(commandName, procedure, dataRoot, globalPorts, procedure.Env) +} + +func BuildContainerSpecWithEnv(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*ContainerSpec, error) { + config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts, env) if err != nil { return nil, err } diff --git a/internal/runtime/kubernetes/backend.go b/internal/runtime/kubernetes/backend.go index 86632b08..69d471ff 100644 --- a/internal/runtime/kubernetes/backend.go +++ b/internal/runtime/kubernetes/backend.go @@ -3,9 +3,11 @@ package kubernetes import ( "bufio" "context" + "encoding/base64" "errors" "fmt" "io" + "path" "strings" "time" @@ -179,9 +181,47 @@ func (b *Backend) ReadScrollFile(scrollRoot string) ([]byte, error) { return b.runJobAndLogs(context.Background(), job) } +func (b *Backend) ReadDataFile(ctx context.Context, dataRoot string, relativePath string) ([]byte, error) { + cleaned, err := cleanDataPath(relativePath) + if err != nil { + return nil, err + } + namespace, pvc, err := parseRef(dataRoot) + if err != nil { + return nil, err + } + job := readDataFileJobSpec(namespace, jobName("read-file", dataRoot, shortHash(cleaned)), pvc, b.config.HelperImage, cleaned) + return b.runJobAndLogs(ctx, job) +} + +func (b *Backend) WriteDataFile(ctx context.Context, dataRoot string, relativePath string, data []byte) error { + cleaned, err := cleanDataPath(relativePath) + if err != nil { + return err + } + namespace, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + job := writeDataFileJobSpec(namespace, jobName("write-file", dataRoot, shortHash(cleaned)), pvc, b.config.HelperImage, cleaned, base64.StdEncoding.EncodeToString(data)) + return b.runHelperJob(ctx, job) +} + +func cleanDataPath(relativePath string) (string, error) { + cleaned := path.Clean(strings.TrimPrefix(relativePath, "/")) + if cleaned == "." || cleaned == ".." || strings.HasPrefix(cleaned, "../") { + return "", fmt.Errorf("invalid data file path %q", relativePath) + } + return cleaned, nil +} + func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { for idx, procedure := range command.Command.Procedures { - procedureName := commandProcedureName(command.Name, idx, procedure) + procedureName := domain.ProcedureName(command.Name, idx, procedure) + env := command.ProcedureEnv[procedureName] + if env == nil { + env = procedure.Env + } if command.Command.Run == domain.RunModePersistent { if procedure.IsSignal() { if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.DataRoot); err != nil { @@ -192,12 +232,12 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { if procedure.Image == "" { return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) } - if err := b.ensurePersistentProcedure(context.Background(), command.DataRoot, procedureName, procedure, command.GlobalPorts); err != nil { + if err := b.ensurePersistentProcedure(context.Background(), command.ScrollID, command.DataRoot, procedureName, procedure, command.GlobalPorts, env); err != nil { return nil, err } continue } - exitCode, err := b.runJobProcedure(procedureName, procedure, command.DataRoot, command.GlobalPorts) + exitCode, err := b.runJobProcedure(command.ScrollID, procedureName, procedure, command.DataRoot, command.GlobalPorts, env) if err != nil { return exitCode, err } @@ -211,7 +251,7 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { return nil, nil } -func (b *Backend) runJobProcedure(procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*int, error) { +func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*int, error) { if procedure.IsSignal() { return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, dataRoot) } @@ -222,21 +262,22 @@ func (b *Backend) runJobProcedure(procedureName string, procedure *domain.Proced if err := b.ensureExpectedServices(ctx, dataRoot, procedureName, procedure, globalPorts); err != nil { return nil, err } - job, err := procedureJobSpec(b.config.Namespace, dataRoot, procedureName, procedure, b.config.RegistrySecret) + job, err := procedureJobSpec(b.config.Namespace, dataRoot, procedureName, procedure, env, b.config.RegistrySecret) if err != nil { return nil, err } - _ = b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, job.Name, metav1.DeleteOptions{}) - if _, err := b.client.BatchV1().Jobs(b.config.Namespace).Create(ctx, job, metav1.CreateOptions{}); err != nil { + createdJob, err := b.createFreshJob(ctx, job) + if err != nil { return nil, err } output := make(chan string, 100) - console, doneChan := b.consoleManager.AddConsoleWithChannel(procedureName, domain.ConsoleTypeContainer, "stdin", output) + consoleID := runtimeConsoleID(scrollID, procedureName) + console, doneChan := b.consoleManager.AddConsoleWithChannel(consoleID, domain.ConsoleTypeContainer, "stdin", output) console.WriteInput = func(data string) error { return b.Attach(procedureName, data) } streamStarted := false - podName, err := b.waitForJobPod(ctx, job.Name) + podName, err := b.waitForJobPod(ctx, job.Name, string(createdJob.UID)) if err == nil { streamStarted = true go b.streamPodLogs(ctx, podName, output) @@ -255,11 +296,11 @@ func (b *Backend) runJobProcedure(procedureName string, procedure *domain.Proced return exitCode, nil } -func (b *Backend) ensurePersistentProcedure(ctx context.Context, dataRoot string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port) error { +func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string, dataRoot string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port, env map[string]string) error { if err := b.ensureExpectedServices(ctx, dataRoot, procedureName, procedure, globalPorts); err != nil { return err } - statefulSet, err := procedureStatefulSetSpec(b.config.Namespace, dataRoot, procedureName, procedure, b.config.RegistrySecret) + statefulSet, err := procedureStatefulSetSpec(b.config.Namespace, dataRoot, procedureName, procedure, env, b.config.RegistrySecret) if err != nil { return err } @@ -278,7 +319,7 @@ func (b *Backend) ensurePersistentProcedure(ctx context.Context, dataRoot string } } output := make(chan string, 100) - console, _ := b.consoleManager.AddConsoleWithChannel(procedureName, domain.ConsoleTypeContainer, "stdin", output) + console, _ := b.consoleManager.AddConsoleWithChannel(runtimeConsoleID(scrollID, procedureName), domain.ConsoleTypeContainer, "stdin", output) console.WriteInput = func(data string) error { return b.Attach(procedureName, data) } @@ -382,6 +423,125 @@ func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.Com return statuses, nil } +func (b *Backend) RoutingTargets(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { + namespace, pvc, err := parseRef(dataRoot) + if err != nil { + return nil, err + } + portsByName := portsByName(globalPorts) + targets := []domain.RuntimeRoutingTarget{} + for commandName, command := range commands { + if command == nil { + continue + } + for idx, procedure := range command.Procedures { + if procedure == nil || len(procedure.ExpectedPorts) == 0 { + continue + } + procedureName := domain.ProcedureName(commandName, idx, procedure) + for _, expectedPort := range procedure.ExpectedPorts { + port, ok := portsByName[expectedPort.Name] + if !ok { + return nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) + } + targets = append(targets, domain.RuntimeRoutingTarget{ + Name: fmt.Sprintf("%s-%s", procedureName, expectedPort.Name), + Procedure: procedureName, + PortName: expectedPort.Name, + Port: port.Port, + Protocol: normalizeProtocol(port.Protocol), + Namespace: namespace, + ServiceName: serviceName(dataRoot, procedureName, expectedPort.Name), + ServicePort: port.Port, + Selector: map[string]string{ + labelManagedBy: "druid", + labelComponent: "runtime", + labelScrollID: dnsLabel(pvc), + labelProcedure: dnsLabel(procedureName), + }, + }) + } + } + } + return targets, nil +} + +func (b *Backend) StopRuntime(dataRoot string) error { + propagation := metav1.DeletePropagationBackground + options := metav1.DeleteOptions{PropagationPolicy: &propagation} + if err := b.deleteRuntimeJobs(context.Background(), dataRoot, options); err != nil { + return err + } + if err := b.deleteRuntimeStatefulSets(context.Background(), dataRoot, options); err != nil { + return err + } + return b.deleteRuntimePodsByScroll(context.Background(), dataRoot, options) +} + +func (b *Backend) DeleteRuntime(dataRoot string, purgeData bool) error { + propagation := metav1.DeletePropagationBackground + options := metav1.DeleteOptions{PropagationPolicy: &propagation} + if err := b.StopRuntime(dataRoot); err != nil { + return err + } + if err := b.deleteRuntimeServices(context.Background(), dataRoot, options); err != nil { + return err + } + if purgeData { + namespace, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + err = b.client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.Background(), pvc, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + } + return nil +} + +func (b *Backend) BackupRuntime(ctx context.Context, dataRoot string, artifact string) error { + if artifact == "" { + return fmt.Errorf("backup artifact is required") + } + if b.config.PullImage == "" { + return b.config.ValidateForMaterialization() + } + namespace, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + job := backupJobSpec(namespace, jobName("backup", dataRoot, shortHash(artifact)), pvc, b.config.PullImage, artifact, b.config.RegistrySecret, b.config.RegistryPlainHTTP) + return b.runHelperJob(ctx, job) +} + +func (b *Backend) RestoreRuntime(ctx context.Context, dataRoot string, artifact string) error { + if artifact == "" { + return fmt.Errorf("restore artifact is required") + } + if err := b.config.ValidateForMaterialization(); err != nil { + return err + } + namespace, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + stagePVC := stagingPVCName("restore:" + dataRoot + ":" + artifact) + if err := b.ensurePVC(ctx, stagePVC); err != nil { + return err + } + defer b.client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.Background(), stagePVC, metav1.DeleteOptions{}) + pullJob := pullJobSpec(namespace, jobName("restore-pull", ref(namespace, stagePVC), shortHash(artifact)), stagePVC, b.config.PullImage, artifact, b.config.RegistrySecret, b.config.RegistryPlainHTTP) + if err := b.runHelperJob(ctx, pullJob); err != nil { + return err + } + if err := b.StopRuntime(dataRoot); err != nil { + return err + } + restoreJob := replacePVCJobSpec(namespace, jobName("restore-copy", dataRoot, shortHash(artifact)), stagePVC, pvc, b.config.HelperImage) + return b.runHelperJob(ctx, restoreJob) +} + func (b *Backend) Attach(commandName string, data string) error { return fmt.Errorf("kubernetes attach is not implemented for console %s: pod attach/exec support is required", commandName) } @@ -443,6 +603,99 @@ func (b *Backend) deleteRuntimePods(ctx context.Context, dataRoot string, target return nil } +func (b *Backend) deleteRuntimeJobs(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { + return b.deleteRuntimeObjects(ctx, dataRoot, func(name string) error { + err := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, name, options) + if apierrors.IsNotFound(err) { + return nil + } + return err + }, "jobs") +} + +func (b *Backend) deleteRuntimeStatefulSets(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { + return b.deleteRuntimeObjects(ctx, dataRoot, func(name string) error { + err := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, name, options) + if apierrors.IsNotFound(err) { + return nil + } + return err + }, "statefulsets") +} + +func (b *Backend) deleteRuntimeServices(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { + return b.deleteRuntimeObjects(ctx, dataRoot, func(name string) error { + err := b.client.CoreV1().Services(b.config.Namespace).Delete(ctx, name, options) + if apierrors.IsNotFound(err) { + return nil + } + return err + }, "services") +} + +func (b *Backend) deleteRuntimeObjects(ctx context.Context, dataRoot string, deleteOne func(name string) error, kind string) error { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + selector := labels.SelectorFromSet(labels.Set{ + labelScrollID: dnsLabel(pvc), + }).String() + switch kind { + case "jobs": + items, err := b.client.BatchV1().Jobs(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return err + } + for _, item := range items.Items { + if err := deleteOne(item.Name); err != nil { + return err + } + } + case "statefulsets": + items, err := b.client.AppsV1().StatefulSets(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return err + } + for _, item := range items.Items { + if err := deleteOne(item.Name); err != nil { + return err + } + } + case "services": + items, err := b.client.CoreV1().Services(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return err + } + for _, item := range items.Items { + if err := deleteOne(item.Name); err != nil { + return err + } + } + } + return nil +} + +func (b *Backend) deleteRuntimePodsByScroll(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { + _, pvc, err := parseRef(dataRoot) + if err != nil { + return err + } + selector := labels.SelectorFromSet(labels.Set{ + labelScrollID: dnsLabel(pvc), + }).String() + pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return err + } + for _, pod := range pods.Items { + if err := b.client.CoreV1().Pods(b.config.Namespace).Delete(ctx, pod.Name, options); err != nil && !apierrors.IsNotFound(err) { + return err + } + } + return nil +} + func (b *Backend) ensurePVC(ctx context.Context, name string) error { pvc := pvcSpec(b.config.Namespace, name, b.config.StorageClass) _, err := b.client.CoreV1().PersistentVolumeClaims(b.config.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) @@ -458,11 +711,11 @@ func (b *Backend) runHelperJob(ctx context.Context, job *batchv1.Job) error { } func (b *Backend) runJobAndLogs(ctx context.Context, job *batchv1.Job) ([]byte, error) { - _ = b.client.BatchV1().Jobs(job.Namespace).Delete(ctx, job.Name, metav1.DeleteOptions{}) - if _, err := b.client.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}); err != nil { + createdJob, err := b.createFreshJob(ctx, job) + if err != nil { return nil, err } - podName, err := b.waitForJobPod(ctx, job.Name) + podName, err := b.waitForJobPod(ctx, job.Name, string(createdJob.UID)) if err != nil { return nil, err } @@ -480,8 +733,36 @@ func (b *Backend) runJobAndLogs(ctx context.Context, job *batchv1.Job) ([]byte, return logs, nil } -func (b *Backend) waitForJobPod(ctx context.Context, jobName string) (string, error) { - selector := labels.SelectorFromSet(labels.Set{"job-name": jobName}).String() +func (b *Backend) createFreshJob(ctx context.Context, job *batchv1.Job) (*batchv1.Job, error) { + propagation := metav1.DeletePropagationBackground + deleteCtx, cancelDelete := context.WithTimeout(ctx, 30*time.Second) + defer cancelDelete() + if err := b.client.BatchV1().Jobs(job.Namespace).Delete(deleteCtx, job.Name, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + for { + _, err := b.client.BatchV1().Jobs(job.Namespace).Get(deleteCtx, job.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + break + } + if err != nil { + return nil, err + } + select { + case <-deleteCtx.Done(): + return nil, deleteCtx.Err() + case <-time.After(250 * time.Millisecond): + } + } + return b.client.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}) +} + +func (b *Backend) waitForJobPod(ctx context.Context, jobName string, controllerUID string) (string, error) { + matchLabels := labels.Set{"job-name": jobName} + if controllerUID != "" { + matchLabels["controller-uid"] = controllerUID + } + selector := labels.SelectorFromSet(matchLabels).String() return b.waitForPodBySelector(ctx, selector) } @@ -580,11 +861,31 @@ func (b *Backend) podLogs(ctx context.Context, podName string) ([]byte, error) { func (b *Backend) streamPodLogs(ctx context.Context, podName string, output chan<- string) { defer close(output) - req := b.client.CoreV1().Pods(b.config.Namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) - stream, err := req.Stream(ctx) - if err != nil { - output <- fmt.Sprintf("failed to stream pod logs: %v", err) - return + var stream io.ReadCloser + deadline := time.Now().Add(30 * time.Second) + for { + req := b.client.CoreV1().Pods(b.config.Namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) + var err error + stream, err = req.Stream(ctx) + if err == nil { + break + } + if !strings.Contains(err.Error(), "ContainerCreating") && + !strings.Contains(err.Error(), "PodInitializing") && + !strings.Contains(err.Error(), "not available") { + output <- fmt.Sprintf("failed to stream pod logs: %v", err) + return + } + if time.Now().After(deadline) { + output <- fmt.Sprintf("failed to stream pod logs: %v", err) + return + } + select { + case <-ctx.Done(): + output <- fmt.Sprintf("failed to stream pod logs: %v", ctx.Err()) + return + case <-time.After(500 * time.Millisecond): + } } defer stream.Close() scanner := bufio.NewScanner(stream) @@ -667,3 +968,10 @@ func commandProcedureName(commandName string, idx int, procedure *domain.Procedu } return procedureName } + +func runtimeConsoleID(scrollID string, procedureName string) string { + if scrollID == "" { + return procedureName + } + return scrollID + "/" + procedureName +} diff --git a/internal/runtime/kubernetes/resources.go b/internal/runtime/kubernetes/resources.go index b93e0cf7..4a430d2d 100644 --- a/internal/runtime/kubernetes/resources.go +++ b/internal/runtime/kubernetes/resources.go @@ -1,6 +1,7 @@ package kubernetes import ( + "path" "path/filepath" "sort" @@ -46,12 +47,46 @@ func pullJobSpec(namespace string, jobName string, pvc string, image string, art return job } +func backupJobSpec(namespace string, jobName string, pvc string, image string, artifact string, registrySecret string, registryPlainHTTP bool) *batchv1.Job { + command := []string{"druid-client", "push", artifact, "/scroll"} + job := helperJobSpec(namespace, jobName, pvc, image, command, registrySecret, map[string]string{ + labelComponent: "backup", + }) + if registryPlainHTTP { + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "DRUID_REGISTRY_PLAIN_HTTP", Value: "true"}) + } + return job +} + func readScrollJobSpec(namespace string, jobName string, pvc string, helperImage string) *batchv1.Job { return helperJobSpec(namespace, jobName, pvc, helperImage, []string{"cat", "/scroll/scroll.yaml"}, "", map[string]string{ labelComponent: "read-scroll", }) } +func readDataFileJobSpec(namespace string, jobName string, pvc string, helperImage string, relativePath string) *batchv1.Job { + return helperJobSpec(namespace, jobName, pvc, helperImage, []string{"cat", path.Join("/scroll", relativePath)}, "", map[string]string{ + labelComponent: "read-data-file", + }) +} + +func writeDataFileJobSpec(namespace string, jobName string, pvc string, helperImage string, relativePath string, encodedData string) *batchv1.Job { + job := helperJobSpec(namespace, jobName, pvc, helperImage, []string{ + "sh", + "-c", + `mkdir -p "$(dirname "$1")" && printf '%s' "$DRUID_DATA_FILE_B64" | base64 -d > "$1"`, + "sh", + path.Join("/scroll", relativePath), + }, "", map[string]string{ + labelComponent: "write-data-file", + }) + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "DRUID_DATA_FILE_B64", + Value: encodedData, + }) + return job +} + func copyPVCJobSpec(namespace string, jobName string, sourcePVC string, targetPVC string, helperImage string) *batchv1.Job { labels := map[string]string{ labelManagedBy: "druid", @@ -85,6 +120,14 @@ func copyPVCJobSpec(namespace string, jobName string, sourcePVC string, targetPV } } +func replacePVCJobSpec(namespace string, jobName string, sourcePVC string, targetPVC string, helperImage string) *batchv1.Job { + job := copyPVCJobSpec(namespace, jobName, sourcePVC, targetPVC, helperImage) + job.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", "find /final -mindepth 1 -maxdepth 1 -exec rm -rf {} + && cp -a /stage/. /final/"} + job.Labels[labelComponent] = "restore-scroll" + job.Spec.Template.Labels[labelComponent] = "restore-scroll" + return job +} + func helperJobSpec(namespace string, jobName string, pvc string, image string, command []string, registrySecret string, labels map[string]string) *batchv1.Job { allLabels := map[string]string{ labelManagedBy: "druid", @@ -118,7 +161,7 @@ func helperJobSpec(namespace string, jobName string, pvc string, image string, c } } -func procedureJobSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, registrySecret string) (*batchv1.Job, error) { +func procedureJobSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*batchv1.Job, error) { _, pvc, err := parseRef(dataRoot) if err != nil { return nil, err @@ -135,7 +178,7 @@ func procedureJobSpec(namespace string, dataRoot string, procedureName string, p TTY: procedure.TTY, Stdin: procedure.TTY, ImagePullPolicy: corev1.PullIfNotPresent, - Env: envVars(procedure.Env), + Env: envVars(env), VolumeMounts: volumeMounts(procedure.Mounts), } podSpec := corev1.PodSpec{ @@ -162,7 +205,7 @@ func procedureJobSpec(namespace string, dataRoot string, procedureName string, p }, nil } -func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, registrySecret string) (*appsv1.StatefulSet, error) { +func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*appsv1.StatefulSet, error) { _, pvc, err := parseRef(dataRoot) if err != nil { return nil, err @@ -179,7 +222,7 @@ func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName s TTY: procedure.TTY, Stdin: procedure.TTY, ImagePullPolicy: corev1.PullIfNotPresent, - Env: envVars(procedure.Env), + Env: envVars(env), VolumeMounts: volumeMounts(procedure.Mounts), } podSpec := corev1.PodSpec{ diff --git a/internal/runtime/kubernetes/resources_test.go b/internal/runtime/kubernetes/resources_test.go index 81f3727a..785849af 100644 --- a/internal/runtime/kubernetes/resources_test.go +++ b/internal/runtime/kubernetes/resources_test.go @@ -7,6 +7,7 @@ import ( "testing" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -38,7 +39,7 @@ func TestProcedureJobSpecBuildsDeterministicMountsAndLabels(t *testing.T) { Mounts: []domain.Mount{{Path: "/work", SubPath: "cache"}}, } - job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, "registry-secret") + job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, procedure.Env, "registry-secret") if err != nil { t.Fatal(err) } @@ -65,6 +66,72 @@ func TestProcedureJobSpecBuildsDeterministicMountsAndLabels(t *testing.T) { } } +func TestProcedureJobSpecUsesProvidedRuntimeEnv(t *testing.T) { + procedure := &domain.Procedure{ + Image: "alpine:3.20", + Env: map[string]string{ + "PROCEDURE_ONLY": "ignored", + }, + } + job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, map[string]string{ + "DRUID_PORT_HTTP": "8080", + }, "registry-secret") + if err != nil { + t.Fatal(err) + } + env := job.Spec.Template.Spec.Containers[0].Env + if len(env) != 1 || env[0].Name != "DRUID_PORT_HTTP" || env[0].Value != "8080" { + t.Fatalf("env = %#v", env) + } +} + +func TestProcedureStatefulSetSpecUsesProvidedRuntimeEnv(t *testing.T) { + procedure := &domain.Procedure{ + Image: "nginx:1.27", + Env: map[string]string{ + "PROCEDURE_ONLY": "ignored", + }, + } + statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, map[string]string{ + "DRUID_PORT_HTTP": "8080", + }, "registry-secret") + if err != nil { + t.Fatal(err) + } + env := statefulSet.Spec.Template.Spec.Containers[0].Env + if len(env) != 1 || env[0].Name != "DRUID_PORT_HTTP" || env[0].Value != "8080" { + t.Fatalf("env = %#v", env) + } +} + +func TestReadDataFileJobSpecScopesPathToScrollPVC(t *testing.T) { + job := readDataFileJobSpec("druid", "read-file", "druid-scroll-data", "alpine:3.20", "data/private/dist/app.wasm") + container := job.Spec.Template.Spec.Containers[0] + if job.Namespace != "druid" || job.Labels[labelComponent] != "read-data-file" { + t.Fatalf("unexpected job metadata: namespace=%s labels=%#v", job.Namespace, job.Labels) + } + if got := container.Command; len(got) != 2 || got[0] != "cat" || got[1] != "/scroll/data/private/dist/app.wasm" { + t.Fatalf("unexpected command: %#v", got) + } + if got := job.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName; got != "druid-scroll-data" { + t.Fatalf("claim = %s, want druid-scroll-data", got) + } +} + +func TestWriteDataFileJobSpecScopesPathToScrollPVC(t *testing.T) { + job := writeDataFileJobSpec("druid", "write-file", "druid-scroll-data", "alpine:3.20", "data/private/config.json", "e30=") + container := job.Spec.Template.Spec.Containers[0] + if job.Namespace != "druid" || job.Labels[labelComponent] != "write-data-file" { + t.Fatalf("unexpected job metadata: namespace=%s labels=%#v", job.Namespace, job.Labels) + } + if got := container.Command; len(got) != 5 || got[4] != "/scroll/data/private/config.json" { + t.Fatalf("unexpected command: %#v", got) + } + if len(container.Env) != 1 || container.Env[0].Name != "DRUID_DATA_FILE_B64" || container.Env[0].Value != "e30=" { + t.Fatalf("unexpected env: %#v", container.Env) + } +} + func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { procedure := &domain.Procedure{ Image: "nginx:1.27", @@ -73,7 +140,7 @@ func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { Mounts: []domain.Mount{{Path: "/usr/share/nginx/html", SubPath: "site", ReadOnly: true}}, } - statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, "registry-secret") + statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, procedure.Env, "registry-secret") if err != nil { t.Fatal(err) } @@ -194,6 +261,146 @@ func TestExpectedPortsDegradesWhenHubbleUnavailable(t *testing.T) { } } +func TestRoutingTargetsReturnStableBackendServices(t *testing.T) { + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) + dataRoot := ref("druid", "druid-static-web-data") + procedureID := "web" + + targets, err := backend.RoutingTargets(dataRoot, map[string]*domain.CommandInstructionSet{ + "serve": {Procedures: []*domain.Procedure{{ + Id: &procedureID, + ExpectedPorts: []domain.ExpectedPort{{Name: "http"}}, + }}}, + }, []domain.Port{{Name: "http", Port: 8080, Protocol: "http"}}) + if err != nil { + t.Fatal(err) + } + + if len(targets) != 1 { + t.Fatalf("targets = %#v", targets) + } + target := targets[0] + if target.Namespace != "druid" || target.ServiceName != serviceName(dataRoot, "web", "http") || target.ServicePort != 8080 { + t.Fatalf("target = %#v", target) + } + if target.Protocol != "http" || target.PortName != "http" || target.Procedure != "web" { + t.Fatalf("target = %#v", target) + } + if target.Selector[labelScrollID] != "druid-static-web-data" || target.Selector[labelProcedure] != "web" { + t.Fatalf("selector = %#v", target.Selector) + } +} + +func TestStopRuntimeDeletesWorkloadsButPreservesDataAndServices(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + dataRoot := ref("druid", "druid-static-web-data") + labels := baseLabels("druid-static-web-data") + labels[labelProcedure] = "web" + jobName := jobName("proc", dataRoot, "web") + statefulSetName := statefulSetName(dataRoot, "web") + service, err := serviceSpec("druid", dataRoot, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) + if err != nil { + t.Fatal(err) + } + for _, create := range []func() error{ + func() error { + _, err := client.CoreV1().PersistentVolumeClaims("druid").Create(context.Background(), pvcSpec("druid", "druid-static-web-data", ""), metav1.CreateOptions{}) + return err + }, + func() error { + _, err := client.CoreV1().Services("druid").Create(context.Background(), service, metav1.CreateOptions{}) + return err + }, + func() error { + _, err := client.BatchV1().Jobs("druid").Create(context.Background(), &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: jobName, Namespace: "druid", Labels: labels}}, metav1.CreateOptions{}) + return err + }, + func() error { + _, err := client.AppsV1().StatefulSets("druid").Create(context.Background(), &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Name: statefulSetName, Namespace: "druid", Labels: labels}}, metav1.CreateOptions{}) + return err + }, + func() error { + _, err := client.CoreV1().Pods("druid").Create(context.Background(), &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "web-0", Namespace: "druid", Labels: labels}}, metav1.CreateOptions{}) + return err + }, + } { + if err := create(); err != nil { + t.Fatal(err) + } + } + + if err := backend.StopRuntime(dataRoot); err != nil { + t.Fatal(err) + } + if _, err := client.BatchV1().Jobs("druid").Get(context.Background(), jobName, metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("Job get error = %v, want not found", err) + } + if _, err := client.AppsV1().StatefulSets("druid").Get(context.Background(), statefulSetName, metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("StatefulSet get error = %v, want not found", err) + } + if _, err := client.CoreV1().Pods("druid").Get(context.Background(), "web-0", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("Pod get error = %v, want not found", err) + } + if _, err := client.CoreV1().Services("druid").Get(context.Background(), service.Name, metav1.GetOptions{}); err != nil { + t.Fatalf("Service get error = %v, want preserved", err) + } + if _, err := client.CoreV1().PersistentVolumeClaims("druid").Get(context.Background(), "druid-static-web-data", metav1.GetOptions{}); err != nil { + t.Fatalf("PVC get error = %v, want preserved", err) + } +} + +func TestDeleteRuntimePurgesServicesAndDataWhenRequested(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + dataRoot := ref("druid", "druid-static-web-data") + service, err := serviceSpec("druid", dataRoot, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) + if err != nil { + t.Fatal(err) + } + if _, err := client.CoreV1().PersistentVolumeClaims("druid").Create(context.Background(), pvcSpec("druid", "druid-static-web-data", ""), metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + if _, err := client.CoreV1().Services("druid").Create(context.Background(), service, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + if err := backend.DeleteRuntime(dataRoot, true); err != nil { + t.Fatal(err) + } + + if _, err := client.CoreV1().Services("druid").Get(context.Background(), service.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("Service get error = %v, want not found", err) + } + if _, err := client.CoreV1().PersistentVolumeClaims("druid").Get(context.Background(), "druid-static-web-data", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("PVC get error = %v, want not found", err) + } +} + +func TestBackupAndRestoreJobSpecsUseRuntimePVCAndRegistryEnv(t *testing.T) { + backup := backupJobSpec("druid", "backup", "runtime-pvc", "druid-client:test", "registry.local/scroll:backup", "registry-secret", true) + if backup.Spec.Template.Spec.Containers[0].Command[1] != "push" { + t.Fatalf("backup command = %#v", backup.Spec.Template.Spec.Containers[0].Command) + } + if got := backup.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName; got != "runtime-pvc" { + t.Fatalf("backup PVC = %s, want runtime-pvc", got) + } + if len(backup.Spec.Template.Spec.ImagePullSecrets) != 1 || backup.Spec.Template.Spec.ImagePullSecrets[0].Name != "registry-secret" { + t.Fatalf("image pull secrets = %#v", backup.Spec.Template.Spec.ImagePullSecrets) + } + if env := backup.Spec.Template.Spec.Containers[0].Env; len(env) != 1 || env[0].Name != "DRUID_REGISTRY_PLAIN_HTTP" || env[0].Value != "true" { + t.Fatalf("env = %#v", env) + } + + restore := replacePVCJobSpec("druid", "restore", "stage-pvc", "runtime-pvc", "alpine:3.20") + if got := restore.Labels[labelComponent]; got != "restore-scroll" { + t.Fatalf("restore component = %s", got) + } + if command := strings.Join(restore.Spec.Template.Spec.Containers[0].Command, " "); !strings.Contains(command, "rm -rf") || !strings.Contains(command, "cp -a") { + t.Fatalf("restore command = %#v", restore.Spec.Template.Spec.Containers[0].Command) + } +} + func TestMaterializationRequiresPullImage(t *testing.T) { backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) _, err := backend.MaterializeScroll(context.Background(), "ghcr.io/example/scroll:latest", "") diff --git a/internal/runtime/kubernetes/state_store.go b/internal/runtime/kubernetes/state_store.go index 2d2656ce..0fabc1c5 100644 --- a/internal/runtime/kubernetes/state_store.go +++ b/internal/runtime/kubernetes/state_store.go @@ -28,9 +28,11 @@ const ( configMapKeyScrollName = "scroll_name" configMapKeyScrollYAML = "scroll_yaml" configMapKeyStatus = "status" + configMapKeyLastError = "last_error" configMapKeyCreatedAt = "created_at" configMapKeyUpdatedAt = "updated_at" configMapKeyCommandsJSON = "commands_json" + configMapKeyRoutingJSON = "routing_json" ) type ConfigMapStateStore struct { @@ -159,6 +161,10 @@ func runtimeScrollConfigMap(namespace string, scroll *domain.RuntimeScroll) (*co if err != nil { return nil, err } + routing, err := json.Marshal(scroll.Routing) + if err != nil { + return nil, err + } return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: scrollConfigMapName(scroll.ID), @@ -179,9 +185,11 @@ func runtimeScrollConfigMap(namespace string, scroll *domain.RuntimeScroll) (*co configMapKeyScrollName: scroll.ScrollName, configMapKeyScrollYAML: scroll.ScrollYAML, configMapKeyStatus: string(scroll.Status), + configMapKeyLastError: scroll.LastError, configMapKeyCreatedAt: formatRuntimeTime(scroll.CreatedAt), configMapKeyUpdatedAt: formatRuntimeTime(scroll.UpdatedAt), configMapKeyCommandsJSON: string(commands), + configMapKeyRoutingJSON: string(routing), }, }, nil } @@ -196,6 +204,14 @@ func runtimeScrollFromConfigMap(configMap *corev1.ConfigMap) (*domain.RuntimeScr if err := json.Unmarshal([]byte(commandsJSON), &commands); err != nil { return nil, err } + routingJSON := data[configMapKeyRoutingJSON] + if routingJSON == "" { + routingJSON = "[]" + } + routing := []domain.RuntimeRouteAssignment{} + if err := json.Unmarshal([]byte(routingJSON), &routing); err != nil { + return nil, err + } id := data[configMapKeyID] if id == "" { id = configMap.Labels[labelScrollID] @@ -209,6 +225,8 @@ func runtimeScrollFromConfigMap(configMap *corev1.ConfigMap) (*domain.RuntimeScr ScrollName: data[configMapKeyScrollName], ScrollYAML: data[configMapKeyScrollYAML], Status: domain.RuntimeScrollStatus(data[configMapKeyStatus]), + LastError: data[configMapKeyLastError], + Routing: routing, CreatedAt: parseRuntimeTime(data[configMapKeyCreatedAt]), UpdatedAt: parseRuntimeTime(data[configMapKeyUpdatedAt]), Commands: commands, diff --git a/internal/runtime/runtime_test.go b/internal/runtime/runtime_test.go index 5dcdec17..502c3337 100644 --- a/internal/runtime/runtime_test.go +++ b/internal/runtime/runtime_test.go @@ -1,6 +1,7 @@ package runtime_test import ( + "context" "os" "path/filepath" "reflect" @@ -56,6 +57,24 @@ func TestDockerRunCommandBuildsCanonicalMounts(t *testing.T) { } } +func TestDockerBuildContainerSpecUsesProvidedRuntimeEnv(t *testing.T) { + dataRoot := t.TempDir() + spec, err := docker.BuildContainerSpecWithEnv("start", &domain.Procedure{ + Image: "alpine:3.20", + Env: map[string]string{ + "PROCEDURE_ONLY": "ignored", + }, + }, dataRoot, nil, map[string]string{ + "DRUID_PORT_HTTP": "8080", + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(spec.Env, []string{"DRUID_PORT_HTTP=8080"}) { + t.Fatalf("env = %#v", spec.Env) + } +} + func TestDockerRunCommandDefaultsMountSubPathToDataRoot(t *testing.T) { dataRoot := t.TempDir() spec, err := docker.BuildContainerSpec("start", &domain.Procedure{ @@ -107,3 +126,44 @@ func TestDockerReadScrollFile(t *testing.T) { t.Fatalf("scroll yaml = %q, want %q", got, want) } } + +func TestDockerReadDataFileScopesToDataRoot(t *testing.T) { + dataRoot := t.TempDir() + want := []byte("bundle") + path := filepath.Join(dataRoot, "data", "private", "dist") + if err := os.MkdirAll(path, 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(path, "app.wasm"), want, 0644); err != nil { + t.Fatal(err) + } + backend := &docker.Backend{} + got, err := backend.ReadDataFile(context.Background(), dataRoot, "/data/private/dist/app.wasm") + if err != nil { + t.Fatal(err) + } + if string(got) != string(want) { + t.Fatalf("data file = %q, want %q", got, want) + } + if _, err := backend.ReadDataFile(context.Background(), dataRoot, "../escape"); err == nil { + t.Fatal("expected traversal path to be rejected") + } +} + +func TestDockerWriteDataFileScopesToDataRoot(t *testing.T) { + dataRoot := t.TempDir() + backend := &docker.Backend{} + if err := backend.WriteDataFile(context.Background(), dataRoot, "data/private/config.json", []byte("{}")); err != nil { + t.Fatal(err) + } + got, err := os.ReadFile(filepath.Join(dataRoot, "data", "private", "config.json")) + if err != nil { + t.Fatal(err) + } + if string(got) != "{}" { + t.Fatalf("written data = %q, want {}", got) + } + if err := backend.WriteDataFile(context.Background(), dataRoot, "../escape", []byte("bad")); err == nil { + t.Fatal("expected traversal path to be rejected") + } +} diff --git a/test/integration/docker/docker_cli_test.go b/test/integration/docker/docker_cli_test.go new file mode 100644 index 00000000..2db4fc1c --- /dev/null +++ b/test/integration/docker/docker_cli_test.go @@ -0,0 +1,121 @@ +//go:build integration && docker + +package docker_test + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/highcard-dev/daemon/test/integration/internal/e2e" +) + +func TestDockerBackendCLIComplexLifecycle(t *testing.T) { + e2e.RequireDocker(t) + bins := e2e.BuildBinaries(t) + port := e2e.FreePort(t) + routePort := e2e.FreePort(t) + name := fmt.Sprintf("docker-cli-%d", time.Now().UnixNano()) + fixture := e2e.WriteFixture(t, filepath.Join(t.TempDir(), "scroll"), name, port, routePort) + + socket := filepath.Join(os.TempDir(), fmt.Sprintf("druid-docker-%d.sock", time.Now().UnixNano())) + t.Cleanup(func() { _ = os.Remove(socket) }) + stateDir := filepath.Join(t.TempDir(), "state") + logs := e2e.StartDaemon(t, bins, "docker", socket, stateDir, nil, nil) + t.Cleanup(func() { + if t.Failed() { + t.Logf("druid daemon logs:\n%s", logs.String()) + } + }) + + created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "--no-start", "--state-dir", stateDir, fixture.Dir, fixture.Name) + if created.Status != "created" { + t.Fatalf("created status = %s, want created", created.Status) + } + e2e.RunClient(t, bins, socket, "routing", "apply", created.ID, "--file", fixture.RoutingFile) + + started := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "start", created.ID) + if started.Status != "running" { + t.Fatalf("started status = %s, want running", started.Status) + } + body := e2e.WaitHTTP(t, fmt.Sprintf("http://127.0.0.1:%d/env.txt", fixture.Port)) + env := e2e.ParseEnv(body) + e2e.AssertRuntimeEnv(t, env, fixture, "docker", created.ID) + if env["USER_ENV"] != "fixture" { + t.Fatalf("USER_ENV = %q, want fixture", env["USER_ENV"]) + } + + statuses := e2e.RunClientJSON[[]e2e.RuntimePortStatus](t, bins, socket, "ports", created.ID) + assertPortBound(t, statuses, fixture) + + e2e.RunClient(t, bins, socket, "run", created.ID, "record") + dataRoot := filepath.Join(stateDir, "scrolls", created.ID, "data") + assertFileContains(t, filepath.Join(dataRoot, "finite.txt"), "finite-ok") + recordEnv := e2e.ParseEnv(readFile(t, filepath.Join(dataRoot, "record-env.txt"))) + e2e.AssertRuntimeEnv(t, recordEnv, fixture, "docker", created.ID) + if recordEnv["USER_ENV"] != "finite" { + t.Fatalf("record USER_ENV = %q, want finite", recordEnv["USER_ENV"]) + } + + e2e.RunClient(t, bins, socket, "stop", created.ID) + waitDockerContainersGone(t, fixture.ServeProc, fixture.RecordProc) + deleted := e2e.RunClient(t, bins, socket, "delete", created.ID) + if !strings.Contains(deleted, `"status": "deleted"`) { + t.Fatalf("delete response = %s, want deleted status", deleted) + } +} + +func assertPortBound(t *testing.T, statuses []e2e.RuntimePortStatus, fixture e2e.Fixture) { + t.Helper() + for _, status := range statuses { + if status.Name == "http" && status.Procedure == fixture.ServeProc { + if !status.Bound { + t.Fatalf("port status = %#v, want bound", status) + } + if status.HostPort != fixture.Port { + t.Fatalf("host port = %d, want %d in status %#v", status.HostPort, fixture.Port, status) + } + return + } + } + t.Fatalf("http port for %s not found in %#v", fixture.ServeProc, statuses) +} + +func assertFileContains(t *testing.T, path string, want string) { + t.Helper() + got := readFile(t, path) + if !strings.Contains(got, want) { + t.Fatalf("%s = %q, want to contain %q", path, got, want) + } +} + +func readFile(t *testing.T, path string) string { + t.Helper() + data, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + return string(data) +} + +func waitDockerContainersGone(t *testing.T, labels ...string) { + t.Helper() + deadline := time.Now().Add(30 * time.Second) + for time.Now().Before(deadline) { + found := false + for _, label := range labels { + out := e2e.Run(t, "docker", "ps", "-a", "--filter", "label=druid.command="+label, "--format", "{{.Names}}") + if strings.TrimSpace(out) != "" { + found = true + } + } + if !found { + return + } + time.Sleep(500 * time.Millisecond) + } + t.Fatalf("docker containers still exist for labels %v", labels) +} diff --git a/test/integration/example_test.go b/test/integration/example_test.go index 883181b3..65ad597c 100644 --- a/test/integration/example_test.go +++ b/test/integration/example_test.go @@ -1,4 +1,4 @@ -//go:build integration +//go:build integration && legacy_examples package integration_test @@ -105,6 +105,7 @@ func TestExamples(t *testing.T) { } runtimeBackend := mock_ports.NewMockRuntimeBackendInterface(ctrl) exitCode := 0 + runtimeBackend.EXPECT().Name().Return("docker").AnyTimes() runtimeBackend.EXPECT().RunCommand(gomock.Any()).Return(&exitCode, nil).AnyTimes() procedureLauncher, err := services.NewProcedureLauncher(scrollService, runtimeBackend, "/tmp") if err != nil { diff --git a/test/integration/internal/e2e/harness.go b/test/integration/internal/e2e/harness.go new file mode 100644 index 00000000..5d933a20 --- /dev/null +++ b/test/integration/internal/e2e/harness.go @@ -0,0 +1,396 @@ +package e2e + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +type Binaries struct { + Druid string + Client string + Home string +} + +type Fixture struct { + Dir string + Name string + ServeProc string + RecordProc string + Port int + RouteHost string + RouteURL string + RoutePort int + RoutingFile string + Assignment string + ScrollRootRef string +} + +type RuntimeScroll struct { + ID string `json:"id"` + DataRoot string `json:"data_root"` + ScrollName string `json:"scroll_name"` + Status string `json:"status"` +} + +type RuntimePortStatus struct { + Name string `json:"name"` + Procedure string `json:"procedure"` + Port int `json:"port"` + Bound bool `json:"bound"` + HostPort int `json:"host_port"` + Source string `json:"source"` +} + +type RuntimeRoutingTarget struct { + Name string `json:"name"` + Procedure string `json:"procedure"` + PortName string `json:"port_name"` + Port int `json:"port"` + Namespace string `json:"namespace"` + ServiceName string `json:"service_name"` + ServicePort int `json:"service_port"` +} + +type LockedBuffer struct { + mu sync.Mutex + buf bytes.Buffer +} + +func (b *LockedBuffer) Write(p []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Write(p) +} + +func (b *LockedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.String() +} + +func RepoRoot(t *testing.T) string { + t.Helper() + _, file, _, ok := runtime.Caller(0) + if !ok { + t.Fatal("could not resolve repo root") + } + return filepath.Clean(filepath.Join(filepath.Dir(file), "..", "..", "..", "..")) +} + +func BuildBinaries(t *testing.T) Binaries { + t.Helper() + binDir := filepath.Join(t.TempDir(), "bin") + if err := os.MkdirAll(binDir, 0755); err != nil { + t.Fatal(err) + } + home := filepath.Join(t.TempDir(), "home") + if err := os.MkdirAll(home, 0755); err != nil { + t.Fatal(err) + } + bins := Binaries{ + Druid: filepath.Join(binDir, "druid"), + Client: filepath.Join(binDir, "druid-client"), + Home: home, + } + build(t, "./apps/druid", bins.Druid) + build(t, "./apps/druid-client", bins.Client) + return bins +} + +func build(t *testing.T, pkg string, output string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + cmd := exec.CommandContext(ctx, "go", "build", "-o", output, pkg) + cmd.Dir = RepoRoot(t) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go build %s failed: %v\n%s", pkg, err, out) + } +} + +func StartDaemon(t *testing.T, bins Binaries, runtimeName string, socket string, stateDir string, extraArgs []string, extraEnv []string) *LockedBuffer { + t.Helper() + if err := os.MkdirAll(filepath.Dir(socket), 0755); err != nil { + t.Fatal(err) + } + ctx, cancel := context.WithCancel(context.Background()) + args := []string{"serve", "--runtime", runtimeName, "--socket", socket, "--state-dir", stateDir} + args = append(args, extraArgs...) + cmd := exec.CommandContext(ctx, bins.Druid, args...) + cmd.Dir = RepoRoot(t) + cmd.Env = append(os.Environ(), append([]string{"HOME=" + bins.Home}, extraEnv...)...) + logs := &LockedBuffer{} + cmd.Stdout = logs + cmd.Stderr = logs + if err := cmd.Start(); err != nil { + cancel() + t.Fatalf("start druid daemon: %v\n%s", err, logs.String()) + } + t.Cleanup(func() { + cancel() + _ = cmd.Wait() + }) + deadline := time.Now().Add(20 * time.Second) + for time.Now().Before(deadline) { + conn, err := net.DialTimeout("unix", socket, 200*time.Millisecond) + if err == nil { + _ = conn.Close() + return logs + } + if cmd.ProcessState != nil && cmd.ProcessState.Exited() { + t.Fatalf("druid daemon exited before socket became ready:\n%s", logs.String()) + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("druid daemon socket %s did not become ready:\n%s", socket, logs.String()) + return logs +} + +func RunClient(t *testing.T, bins Binaries, socket string, args ...string) string { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + config := filepath.Join(bins.Home, "client.yaml") + envFile := filepath.Join(bins.Home, ".env") + fullArgs := append([]string{"--daemon-socket", socket, "--config", config, "--env-file", envFile}, args...) + cmd := exec.CommandContext(ctx, bins.Client, fullArgs...) + cmd.Dir = RepoRoot(t) + cmd.Env = append(os.Environ(), "HOME="+bins.Home) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("druid-client %s failed: %v\n%s", strings.Join(args, " "), err, out) + } + return string(out) +} + +func RunClientJSON[T any](t *testing.T, bins Binaries, socket string, args ...string) T { + t.Helper() + out := RunClient(t, bins, socket, args...) + var value T + if err := json.Unmarshal([]byte(out), &value); err != nil { + t.Fatalf("decode druid-client %s JSON: %v\n%s", strings.Join(args, " "), err, out) + } + return value +} + +func FreePort(t *testing.T) int { + t.Helper() + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer listener.Close() + return listener.Addr().(*net.TCPAddr).Port +} + +func WriteFixture(t *testing.T, dir string, name string, port int, routePort int) Fixture { + t.Helper() + suffix := strings.ToLower(strings.ReplaceAll(name, "_", "-")) + serveProc := "web-" + suffix + recordProc := "record-" + suffix + routeHost := name + ".runtime.test" + routeURL := fmt.Sprintf("http://%s:%d", routeHost, routePort) + yaml := fmt.Sprintf(`name: %s +desc: CLI integration fixture with persistent data, a finite command, declared ports, and runtime env checks +version: 0.1.0 +app_version: "test" +serve: serve +ports: + - name: http + protocol: http + port: %d + mandatory: true +commands: + serve: + run: persistent + procedures: + - id: %s + image: busybox:1.36 + env: + USER_ENV: fixture + DRUID_PORT_HTTP: user-should-not-win + expectedPorts: + - name: http + keepAliveTraffic: 1b/5m + mounts: + - path: /site + sub_path: public + command: + - sh + - -c + - >- + set -eu; + mkdir -p /site; + env | sort > /site/env.txt; + printf 'healthy\n' > /site/index.txt; + httpd -f -p %d -h /site + record: + run: once + procedures: + - id: %s + image: busybox:1.36 + env: + USER_ENV: finite + mounts: + - path: /data + command: + - sh + - -c + - >- + set -eu; + mkdir -p /data; + printf 'finite-ok\n' > /data/finite.txt; + env | sort > /data/record-env.txt +`, name, port, serveProc, port, recordProc) + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "scroll.yaml"), []byte(yaml), 0644); err != nil { + t.Fatal(err) + } + routingFile := filepath.Join(dir, "routing.json") + routing := fmt.Sprintf(`{ + "assignments": [ + { + "name": "%s-http", + "port_name": "http", + "host": "%s", + "external_ip": "127.0.0.1", + "public_port": %d, + "url": "%s", + "protocol": "http" + } + ] +}`, serveProc, routeHost, routePort, routeURL) + if err := os.WriteFile(routingFile, []byte(routing), 0644); err != nil { + t.Fatal(err) + } + return Fixture{ + Dir: dir, + Name: name, + ServeProc: serveProc, + RecordProc: recordProc, + Port: port, + RouteHost: routeHost, + RouteURL: routeURL, + RoutePort: routePort, + RoutingFile: routingFile, + Assignment: serveProc + "-http", + } +} + +func WaitHTTP(t *testing.T, url string) string { + t.Helper() + client := &http.Client{Timeout: 2 * time.Second} + deadline := time.Now().Add(90 * time.Second) + var lastErr error + for time.Now().Before(deadline) { + resp, err := client.Get(url) + if err == nil { + body, readErr := io.ReadAll(resp.Body) + _ = resp.Body.Close() + if readErr == nil && resp.StatusCode >= 200 && resp.StatusCode < 300 { + return string(body) + } + lastErr = fmt.Errorf("status %d body %q readErr %v", resp.StatusCode, string(body), readErr) + } else { + lastErr = err + } + time.Sleep(500 * time.Millisecond) + } + t.Fatalf("HTTP %s did not become ready: %v", url, lastErr) + return "" +} + +func ParseEnv(body string) map[string]string { + result := map[string]string{} + for _, line := range strings.Split(body, "\n") { + if line == "" { + continue + } + key, value, ok := strings.Cut(line, "=") + if ok { + result[key] = value + } + } + return result +} + +func AssertRuntimeEnv(t *testing.T, env map[string]string, fixture Fixture, runtimeName string, scrollID string) { + t.Helper() + want := map[string]string{ + "DRUID_SCROLL_ID": scrollID, + "DRUID_SCROLL_NAME": fixture.Name, + "DRUID_RUNTIME_BACKEND": runtimeName, + "DRUID_PORT_HTTP": fmt.Sprintf("%d", fixture.Port), + "DRUID_PORT_HTTP_1": fmt.Sprintf("%d", fixture.Port), + "DRUID_PORT_HTTP_PROTOCOL": "http", + "DRUID_PORT_HTTP_IP": "127.0.0.1", + "DRUID_IP": "127.0.0.1", + "DRUID_PORT_HTTP_PUBLIC": fmt.Sprintf("%d", fixture.RoutePort), + "DRUID_PORT_HTTP_HOST": fixture.RouteHost, + "DRUID_PORT_HTTP_URL": fixture.RouteURL, + } + for key, value := range want { + if env[key] != value { + t.Fatalf("%s = %q, want %q in env %#v", key, env[key], value, env) + } + } + if env["DRUID_IP_WAIT"] != "" { + t.Fatalf("DRUID_IP_WAIT = %q, want unset after routing assignment", env["DRUID_IP_WAIT"]) + } + if env["DRUID_PORT_HTTP"] == "user-should-not-win" { + t.Fatalf("runtime DRUID_PORT_HTTP did not override procedure env") + } +} + +func RequireCommand(t *testing.T, name string) { + t.Helper() + if _, err := exec.LookPath(name); err != nil { + if os.Getenv("CI") == "" { + t.Skipf("%s is required for this integration test", name) + } + t.Fatalf("%s is required for this integration test: %v", name, err) + } +} + +func RequireDocker(t *testing.T) { + t.Helper() + RequireCommand(t, "docker") + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, "docker", "info") + if out, err := cmd.CombinedOutput(); err != nil { + if os.Getenv("CI") == "" { + t.Skipf("Docker daemon is required for this integration test: %v\n%s", err, out) + } + t.Fatalf("Docker daemon is required for this integration test: %v\n%s", err, out) + } +} + +func Run(t *testing.T, name string, args ...string) string { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + cmd := exec.CommandContext(ctx, name, args...) + cmd.Dir = RepoRoot(t) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s %s failed: %v\n%s", name, strings.Join(args, " "), err, out) + } + return string(out) +} diff --git a/test/integration/kubernetes/kubernetes_cli_test.go b/test/integration/kubernetes/kubernetes_cli_test.go new file mode 100644 index 00000000..ba5b54ff --- /dev/null +++ b/test/integration/kubernetes/kubernetes_cli_test.go @@ -0,0 +1,339 @@ +//go:build integration && kubernetes + +package kubernetes_test + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/highcard-dev/daemon/test/integration/internal/e2e" +) + +func TestKubernetesBackendCLIComplexLifecycle(t *testing.T) { + requireKubernetes(t) + bins := e2e.BuildBinaries(t) + port := e2e.FreePort(t) + routePort := e2e.FreePort(t) + suffix := fmt.Sprintf("%x", time.Now().UnixNano())[:10] + namespace := "druid-cli-e2e-" + suffix + pvc := "druid-e2e-" + suffix + ref := fmt.Sprintf("k8s://%s/%s", namespace, pvc) + name := "k8s-cli-" + suffix + fixture := e2e.WriteFixture(t, filepath.Join(t.TempDir(), "scroll"), name, port, routePort) + fixture.ScrollRootRef = ref + + e2e.Run(t, "kubectl", "create", "namespace", namespace) + t.Cleanup(func() { + e2e.Run(t, "kubectl", "delete", "namespace", namespace, "--ignore-not-found=true", "--wait=false") + }) + seedPVC(t, namespace, pvc, fixture.Dir) + kubeconfig := writeCurrentKubeconfig(t) + + socket := filepath.Join(os.TempDir(), fmt.Sprintf("druid-k8s-%d.sock", time.Now().UnixNano())) + t.Cleanup(func() { _ = os.Remove(socket) }) + stateDir := filepath.Join(t.TempDir(), "state") + logs := e2e.StartDaemon(t, bins, "kubernetes", socket, stateDir, []string{ + "--k8s-namespace", namespace, + "--k8s-kubeconfig", kubeconfig, + "--hubble-relay-addr", "127.0.0.1:9", + }, nil) + t.Cleanup(func() { + if t.Failed() { + t.Logf("druid daemon logs:\n%s", logs.String()) + } + }) + + created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "--no-start", "--scroll-root", ref, "--data-root", ref, "seeded-artifact", fixture.Name) + if created.Status != "created" { + t.Fatalf("created status = %s, want created", created.Status) + } + targets := e2e.RunClientJSON[[]e2e.RuntimeRoutingTarget](t, bins, socket, "routing", "targets", created.ID) + target := findTarget(t, targets, fixture) + if target.Namespace != namespace || target.ServicePort != fixture.Port { + t.Fatalf("target = %#v, want namespace %s service port %d", target, namespace, fixture.Port) + } + + e2e.RunClient(t, bins, socket, "routing", "apply", created.ID, "--file", fixture.RoutingFile) + started := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "start", created.ID) + if started.Status != "running" { + t.Fatalf("started status = %s, want running", started.Status) + } + + localPort := e2e.FreePort(t) + waitServiceExists(t, namespace, target.ServiceName) + waitRuntimePodReady(t, namespace, pvc) + forward := startPortForward(t, namespace, target.ServiceName, localPort, fixture.Port) + t.Cleanup(forward) + body := e2e.WaitHTTP(t, fmt.Sprintf("http://127.0.0.1:%d/env.txt", localPort)) + env := e2e.ParseEnv(body) + e2e.AssertRuntimeEnv(t, env, fixture, "kubernetes", created.ID) + if env["USER_ENV"] != "fixture" { + t.Fatalf("USER_ENV = %q, want fixture", env["USER_ENV"]) + } + + statuses := e2e.RunClientJSON[[]e2e.RuntimePortStatus](t, bins, socket, "ports", created.ID) + assertKubernetesPort(t, statuses, fixture) + + e2e.RunClient(t, bins, socket, "run", created.ID, "record") + if got := readPVCFile(t, namespace, pvc, "data/finite.txt"); !strings.Contains(got, "finite-ok") { + t.Fatalf("finite file = %q, want finite-ok", got) + } + recordEnv := e2e.ParseEnv(readPVCFile(t, namespace, pvc, "data/record-env.txt")) + e2e.AssertRuntimeEnv(t, recordEnv, fixture, "kubernetes", created.ID) + if recordEnv["USER_ENV"] != "finite" { + t.Fatalf("record USER_ENV = %q, want finite", recordEnv["USER_ENV"]) + } + + stopped := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "stop", created.ID) + if stopped.Status != "stopped" { + t.Fatalf("stopped status = %s, want stopped", stopped.Status) + } + waitKubernetesWorkloadsGone(t, namespace, pvc) + deleted := e2e.RunClient(t, bins, socket, "delete", created.ID) + if !strings.Contains(deleted, `"status": "deleted"`) { + t.Fatalf("delete response = %s, want deleted status", deleted) + } + waitKubernetesServicesGone(t, namespace, pvc) +} + +func requireKubernetes(t *testing.T) { + t.Helper() + e2e.RequireCommand(t, "kubectl") + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, "kubectl", "cluster-info") + if out, err := cmd.CombinedOutput(); err != nil { + if os.Getenv("CI") == "" { + t.Skipf("Kubernetes cluster is required for this integration test: %v\n%s", err, out) + } + t.Fatalf("Kubernetes cluster is required for this integration test: %v\n%s", err, out) + } +} + +func seedPVC(t *testing.T, namespace string, pvc string, fixtureDir string) { + t.Helper() + applyManifest(t, fmt.Sprintf(`apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: %s + namespace: %s +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: seed-%s + namespace: %s +spec: + restartPolicy: Never + containers: + - name: seed + image: busybox:1.36 + command: ["sh", "-c", "sleep 3600"] + volumeMounts: + - name: runtime + mountPath: /runtime + volumes: + - name: runtime + persistentVolumeClaim: + claimName: %s +`, pvc, namespace, pvc, namespace, pvc)) + seedPod := "seed-" + pvc + e2e.Run(t, "kubectl", "wait", "-n", namespace, "--for=condition=Ready", "pod/"+seedPod, "--timeout=180s") + e2e.Run(t, "kubectl", "cp", filepath.Join(fixtureDir, "scroll.yaml"), namespace+"/"+seedPod+":/runtime/scroll.yaml") + e2e.Run(t, "kubectl", "exec", "-n", namespace, seedPod, "--", "sh", "-c", "mkdir -p /runtime/data/public") + e2e.Run(t, "kubectl", "delete", "pod", "-n", namespace, seedPod, "--wait=true") +} + +func applyManifest(t *testing.T, manifest string) { + t.Helper() + path := filepath.Join(t.TempDir(), "manifest.yaml") + if err := os.WriteFile(path, []byte(manifest), 0644); err != nil { + t.Fatal(err) + } + e2e.Run(t, "kubectl", "apply", "-f", path) +} + +func writeCurrentKubeconfig(t *testing.T) string { + t.Helper() + path := filepath.Join(t.TempDir(), "kubeconfig") + data := e2e.Run(t, "kubectl", "config", "view", "--raw", "--minify", "--flatten") + if err := os.WriteFile(path, []byte(data), 0600); err != nil { + t.Fatal(err) + } + return path +} + +func findTarget(t *testing.T, targets []e2e.RuntimeRoutingTarget, fixture e2e.Fixture) e2e.RuntimeRoutingTarget { + t.Helper() + for _, target := range targets { + if target.PortName == "http" && target.Procedure == fixture.ServeProc { + return target + } + } + t.Fatalf("http target for %s not found in %#v", fixture.ServeProc, targets) + return e2e.RuntimeRoutingTarget{} +} + +func startPortForward(t *testing.T, namespace string, service string, localPort int, remotePort int) func() { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(ctx, "kubectl", "port-forward", "-n", namespace, "svc/"+service, fmt.Sprintf("%d:%d", localPort, remotePort)) + var logs e2e.LockedBuffer + cmd.Stdout = &logs + cmd.Stderr = &logs + if err := cmd.Start(); err != nil { + cancel() + t.Fatalf("start kubectl port-forward: %v", err) + } + deadline := time.Now().Add(30 * time.Second) + for time.Now().Before(deadline) { + if strings.Contains(logs.String(), "Forwarding from") { + return func() { + cancel() + _ = cmd.Wait() + } + } + time.Sleep(100 * time.Millisecond) + } + cancel() + _ = cmd.Wait() + t.Fatalf("kubectl port-forward did not become ready:\n%s", logs.String()) + return func() {} +} + +func waitServiceExists(t *testing.T, namespace string, service string) { + t.Helper() + deadline := time.Now().Add(60 * time.Second) + for time.Now().Before(deadline) { + if _, err := kubectlOutput("get", "service", "-n", namespace, service); err == nil { + return + } + time.Sleep(500 * time.Millisecond) + } + out, _ := kubectlOutput("get", "service", "-n", namespace, "-o", "name") + t.Fatalf("service %s did not appear; services:\n%s", service, out) +} + +func waitRuntimePodReady(t *testing.T, namespace string, pvc string) { + t.Helper() + selector := "app.kubernetes.io/managed-by=druid,druid.gg/scroll-id=" + pvc + deadline := time.Now().Add(3 * time.Minute) + for time.Now().Before(deadline) { + out, err := kubectlOutput("get", "pod", "-n", namespace, "-l", selector, "-o", "jsonpath={.items[0].status.phase}") + if err == nil && strings.TrimSpace(out) == "Running" { + if _, waitErr := kubectlOutput("wait", "-n", namespace, "--for=condition=Ready", "pod", "-l", selector, "--timeout=10s"); waitErr == nil { + return + } + } + time.Sleep(500 * time.Millisecond) + } + out, _ := kubectlOutput("get", "pod", "-n", namespace, "-l", selector, "-o", "wide") + t.Fatalf("runtime pod did not become ready:\n%s", out) +} + +func assertKubernetesPort(t *testing.T, statuses []e2e.RuntimePortStatus, fixture e2e.Fixture) { + t.Helper() + for _, status := range statuses { + if status.Name == "http" && status.Procedure == fixture.ServeProc { + if !status.Bound { + t.Fatalf("port status = %#v, want bound", status) + } + if status.Port != fixture.Port { + t.Fatalf("service port = %d, want %d in status %#v", status.Port, fixture.Port, status) + } + return + } + } + t.Fatalf("http port for %s not found in %#v", fixture.ServeProc, statuses) +} + +func readPVCFile(t *testing.T, namespace string, pvc string, relativePath string) string { + t.Helper() + pod := "read-" + strings.ReplaceAll(relativePath, "/", "-") + "-" + fmt.Sprintf("%x", time.Now().UnixNano())[:8] + applyManifest(t, fmt.Sprintf(`apiVersion: v1 +kind: Pod +metadata: + name: %s + namespace: %s +spec: + restartPolicy: Never + containers: + - name: read + image: busybox:1.36 + command: ["sh", "-c", "cat /runtime/%s"] + volumeMounts: + - name: runtime + mountPath: /runtime + volumes: + - name: runtime + persistentVolumeClaim: + claimName: %s +`, pod, namespace, relativePath, pvc)) + defer e2e.Run(t, "kubectl", "delete", "pod", "-n", namespace, pod, "--ignore-not-found=true", "--wait=false") + waitPodSucceeded(t, namespace, pod) + return e2e.Run(t, "kubectl", "logs", "-n", namespace, pod) +} + +func waitPodSucceeded(t *testing.T, namespace string, pod string) { + t.Helper() + deadline := time.Now().Add(2 * time.Minute) + for time.Now().Before(deadline) { + out, err := kubectlOutput("get", "pod", "-n", namespace, pod, "-o", "jsonpath={.status.phase}") + phase := strings.TrimSpace(out) + if err == nil && phase == "Succeeded" { + return + } + if err == nil && phase == "Failed" { + logs, _ := kubectlOutput("logs", "-n", namespace, pod) + t.Fatalf("pod %s failed:\n%s", pod, logs) + } + time.Sleep(500 * time.Millisecond) + } + logs, _ := kubectlOutput("logs", "-n", namespace, pod) + t.Fatalf("pod %s did not succeed:\n%s", pod, logs) +} + +func kubectlOutput(args ...string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, "kubectl", args...) + out, err := cmd.CombinedOutput() + return string(out), err +} + +func waitKubernetesWorkloadsGone(t *testing.T, namespace string, pvc string) { + t.Helper() + waitKubernetesResourcesGone(t, namespace, pvc, "statefulset,job,pod") +} + +func waitKubernetesServicesGone(t *testing.T, namespace string, pvc string) { + t.Helper() + waitKubernetesResourcesGone(t, namespace, pvc, "service") +} + +func waitKubernetesResourcesGone(t *testing.T, namespace string, pvc string, resource string) { + t.Helper() + selector := "app.kubernetes.io/managed-by=druid,druid.gg/scroll-id=" + pvc + deadline := time.Now().Add(60 * time.Second) + for time.Now().Before(deadline) { + out := e2e.Run(t, "kubectl", "get", resource, "-n", namespace, "-l", selector, "-o", "name", "--ignore-not-found=true") + if strings.TrimSpace(out) == "" { + return + } + time.Sleep(500 * time.Millisecond) + } + out := e2e.Run(t, "kubectl", "get", resource, "-n", namespace, "-l", selector, "-o", "name", "--ignore-not-found=true") + t.Fatalf("kubernetes %s still exist:\n%s", resource, out) +} From aaac0df63dc9423533eac408660489a122367440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Schottst=C3=A4dt?= Date: Sun, 10 May 2026 02:33:38 +0200 Subject: [PATCH 3/6] chore: trim unused runtime helpers --- apps/druid/adapters/cli/output.go | 15 ---------- internal/runtime/docker/backend.go | 8 ----- internal/runtime/kubernetes/backend.go | 8 ----- test/integration/docker/docker_cli_test.go | 12 ++------ test/integration/internal/e2e/harness.go | 30 +++++++------------ .../kubernetes/kubernetes_cli_test.go | 15 ++-------- 6 files changed, 16 insertions(+), 72 deletions(-) delete mode 100644 apps/druid/adapters/cli/output.go diff --git a/apps/druid/adapters/cli/output.go b/apps/druid/adapters/cli/output.go deleted file mode 100644 index 3bdd634c..00000000 --- a/apps/druid/adapters/cli/output.go +++ /dev/null @@ -1,15 +0,0 @@ -package cli - -import ( - "encoding/json" - "fmt" -) - -func printJSON(v interface{}) error { - data, err := json.MarshalIndent(v, "", " ") - if err != nil { - return err - } - fmt.Println(string(data)) - return nil -} diff --git a/internal/runtime/docker/backend.go b/internal/runtime/docker/backend.go index 7824b983..801b1738 100644 --- a/internal/runtime/docker/backend.go +++ b/internal/runtime/docker/backend.go @@ -520,14 +520,6 @@ func dataRootHash(dataRoot string) string { return hex.EncodeToString(hash[:])[:10] } -func commandProcedureName(commandName string, idx int, procedure *domain.Procedure) string { - procedureName := fmt.Sprintf("%s.%d", commandName, idx) - if procedure != nil && procedure.Id != nil { - procedureName = *procedure.Id - } - return procedureName -} - func runtimeConsoleID(scrollID string, procedureName string) string { if scrollID == "" { return procedureName diff --git a/internal/runtime/kubernetes/backend.go b/internal/runtime/kubernetes/backend.go index 69d471ff..709fc25a 100644 --- a/internal/runtime/kubernetes/backend.go +++ b/internal/runtime/kubernetes/backend.go @@ -961,14 +961,6 @@ func portsByName(ports []domain.Port) map[string]domain.Port { return result } -func commandProcedureName(commandName string, idx int, procedure *domain.Procedure) string { - procedureName := fmt.Sprintf("%s.%d", commandName, idx) - if procedure != nil && procedure.Id != nil { - procedureName = *procedure.Id - } - return procedureName -} - func runtimeConsoleID(scrollID string, procedureName string) string { if scrollID == "" { return procedureName diff --git a/test/integration/docker/docker_cli_test.go b/test/integration/docker/docker_cli_test.go index 2db4fc1c..7394d8a2 100644 --- a/test/integration/docker/docker_cli_test.go +++ b/test/integration/docker/docker_cli_test.go @@ -53,7 +53,9 @@ func TestDockerBackendCLIComplexLifecycle(t *testing.T) { e2e.RunClient(t, bins, socket, "run", created.ID, "record") dataRoot := filepath.Join(stateDir, "scrolls", created.ID, "data") - assertFileContains(t, filepath.Join(dataRoot, "finite.txt"), "finite-ok") + if got := readFile(t, filepath.Join(dataRoot, "finite.txt")); !strings.Contains(got, "finite-ok") { + t.Fatalf("finite file = %q, want finite-ok", got) + } recordEnv := e2e.ParseEnv(readFile(t, filepath.Join(dataRoot, "record-env.txt"))) e2e.AssertRuntimeEnv(t, recordEnv, fixture, "docker", created.ID) if recordEnv["USER_ENV"] != "finite" { @@ -84,14 +86,6 @@ func assertPortBound(t *testing.T, statuses []e2e.RuntimePortStatus, fixture e2e t.Fatalf("http port for %s not found in %#v", fixture.ServeProc, statuses) } -func assertFileContains(t *testing.T, path string, want string) { - t.Helper() - got := readFile(t, path) - if !strings.Contains(got, want) { - t.Fatalf("%s = %q, want to contain %q", path, got, want) - } -} - func readFile(t *testing.T, path string) string { t.Helper() data, err := os.ReadFile(path) diff --git a/test/integration/internal/e2e/harness.go b/test/integration/internal/e2e/harness.go index 5d933a20..d3da8ce8 100644 --- a/test/integration/internal/e2e/harness.go +++ b/test/integration/internal/e2e/harness.go @@ -25,24 +25,20 @@ type Binaries struct { } type Fixture struct { - Dir string - Name string - ServeProc string - RecordProc string - Port int - RouteHost string - RouteURL string - RoutePort int - RoutingFile string - Assignment string - ScrollRootRef string + Dir string + Name string + ServeProc string + RecordProc string + Port int + RouteHost string + RouteURL string + RoutePort int + RoutingFile string } type RuntimeScroll struct { - ID string `json:"id"` - DataRoot string `json:"data_root"` - ScrollName string `json:"scroll_name"` - Status string `json:"status"` + ID string `json:"id"` + Status string `json:"status"` } type RuntimePortStatus struct { @@ -51,14 +47,11 @@ type RuntimePortStatus struct { Port int `json:"port"` Bound bool `json:"bound"` HostPort int `json:"host_port"` - Source string `json:"source"` } type RuntimeRoutingTarget struct { - Name string `json:"name"` Procedure string `json:"procedure"` PortName string `json:"port_name"` - Port int `json:"port"` Namespace string `json:"namespace"` ServiceName string `json:"service_name"` ServicePort int `json:"service_port"` @@ -289,7 +282,6 @@ commands: RouteURL: routeURL, RoutePort: routePort, RoutingFile: routingFile, - Assignment: serveProc + "-http", } } diff --git a/test/integration/kubernetes/kubernetes_cli_test.go b/test/integration/kubernetes/kubernetes_cli_test.go index ba5b54ff..3b6b1384 100644 --- a/test/integration/kubernetes/kubernetes_cli_test.go +++ b/test/integration/kubernetes/kubernetes_cli_test.go @@ -26,7 +26,6 @@ func TestKubernetesBackendCLIComplexLifecycle(t *testing.T) { ref := fmt.Sprintf("k8s://%s/%s", namespace, pvc) name := "k8s-cli-" + suffix fixture := e2e.WriteFixture(t, filepath.Join(t.TempDir(), "scroll"), name, port, routePort) - fixture.ScrollRootRef = ref e2e.Run(t, "kubectl", "create", "namespace", namespace) t.Cleanup(func() { @@ -94,12 +93,12 @@ func TestKubernetesBackendCLIComplexLifecycle(t *testing.T) { if stopped.Status != "stopped" { t.Fatalf("stopped status = %s, want stopped", stopped.Status) } - waitKubernetesWorkloadsGone(t, namespace, pvc) + waitKubernetesResourcesGone(t, namespace, pvc, "statefulset,job,pod") deleted := e2e.RunClient(t, bins, socket, "delete", created.ID) if !strings.Contains(deleted, `"status": "deleted"`) { t.Fatalf("delete response = %s, want deleted status", deleted) } - waitKubernetesServicesGone(t, namespace, pvc) + waitKubernetesResourcesGone(t, namespace, pvc, "service") } func requireKubernetes(t *testing.T) { @@ -313,16 +312,6 @@ func kubectlOutput(args ...string) (string, error) { return string(out), err } -func waitKubernetesWorkloadsGone(t *testing.T, namespace string, pvc string) { - t.Helper() - waitKubernetesResourcesGone(t, namespace, pvc, "statefulset,job,pod") -} - -func waitKubernetesServicesGone(t *testing.T, namespace string, pvc string) { - t.Helper() - waitKubernetesResourcesGone(t, namespace, pvc, "service") -} - func waitKubernetesResourcesGone(t *testing.T, namespace string, pvc string, resource string) { t.Helper() selector := "app.kubernetes.io/managed-by=druid,druid.gg/scroll-id=" + pvc From aa519e7bfd78874d22bdc137de27ae20b917b2e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Schottst=C3=A4dt?= Date: Thu, 14 May 2026 21:27:29 +0200 Subject: [PATCH 4/6] chore: sloppy draft --- .docker/entrypoint.sh | 14 +- .github/workflows/build.yml | 2 - .github/workflows/docker-builds.yml | 71 -- .github/workflows/pr.yml | 2 - .vscode/launch.json | 87 +- .vscode/tasks.json | 17 + CONTEXT.md | 120 +- Dockerfile | 5 +- Dockerfile.coldstarter | 10 - Dockerfile.steamcmd | 15 +- Makefile | 18 +- README.md | 26 +- api/callback-oapi-codegen.yaml | 7 + api/callback.openapi.yaml | 51 + api/dev-oapi-codegen.yaml | 7 + api/dev.openapi.yaml | 112 ++ api/openapi.yaml | 51 +- apps/druid-client/adapters/cli/attach.go | 17 - apps/druid-client/adapters/cli/create.go | 117 -- apps/druid-client/adapters/cli/delete.go | 22 - apps/druid-client/adapters/cli/describe.go | 22 - apps/druid-client/adapters/cli/lifecycle.go | 41 - apps/druid-client/adapters/cli/list.go | 22 - apps/druid-client/adapters/cli/login.go | 71 -- apps/druid-client/adapters/cli/ports.go | 22 - apps/druid-client/adapters/cli/pull.go | 36 - apps/druid-client/adapters/cli/push.go | 129 -- .../adapters/cli/push_category.go | 43 - apps/druid-client/adapters/cli/register.go | 48 - apps/druid-client/adapters/cli/root.go | 94 -- apps/druid-client/adapters/cli/root_test.go | 52 - apps/druid-client/adapters/cli/routing.go | 95 -- apps/druid-client/adapters/cli/run.go | 22 - .../druid-client/core/ports/runtime_daemon.go | 24 - .../core/services/runtime_service.go | 56 - apps/druid-client/main.go | 15 - apps/druid-coldstarter/adapters/cli/root.go | 25 +- .../adapters/cli/root_test.go | 32 + .../adapters/filesystem/status_writer.go | 4 +- .../core/ports/status_writer.go | 2 +- .../core/services/coldstarter.go | 48 +- .../core/services/coldstarter_test.go | 78 ++ apps/druid/adapters/cli/app_version.go | 129 -- apps/druid/adapters/cli/attach.go | 19 + apps/druid/adapters/cli/callback.go | 33 + apps/druid/adapters/cli/client/create.go | 52 + apps/druid/adapters/cli/client/delete.go | 20 + apps/druid/adapters/cli/client/describe.go | 20 + apps/druid/adapters/cli/client/dev.go | 419 +++++++ apps/druid/adapters/cli/client/dev_test.go | 211 ++++ apps/druid/adapters/cli/client/list.go | 20 + .../adapters/cli/client}/output.go | 2 +- apps/druid/adapters/cli/client/ports.go | 20 + apps/druid/adapters/cli/client/register.go | 76 ++ apps/druid/adapters/cli/client/routing.go | 8 + .../adapters/cli/client/routing_apply.go | 100 ++ .../adapters/cli/client/routing_publish.go | 20 + .../cli/client/routing_publish_test.go | 197 +++ .../adapters/cli/client/routing_targets.go | 20 + apps/druid/adapters/cli/client/run.go | 20 + apps/druid/adapters/cli/client/start.go | 20 + apps/druid/adapters/cli/client/stop.go | 20 + apps/druid/adapters/cli/daemon.go | 210 ++++ apps/druid/adapters/cli/login.go | 71 ++ apps/druid/adapters/cli/pull.go | 38 + apps/druid/adapters/cli/push.go | 115 ++ apps/druid/adapters/cli/push_category.go | 44 + apps/druid/adapters/cli/root.go | 24 +- apps/druid/adapters/cli/root_test.go | 37 +- apps/druid/adapters/cli/runtime_client.go | 21 + apps/druid/adapters/cli/serve.go | 151 --- apps/druid/adapters/cli/update.go | 24 +- apps/druid/adapters/cli/validate.go | 1 + apps/druid/adapters/cli/version.go | 4 + apps/druid/adapters/cli/worker.go | 12 + apps/druid/adapters/cli/worker_pull.go | 320 +++++ apps/druid/adapters/cli/worker_push.go | 42 + apps/druid/adapters/cli/worker_test.go | 114 ++ .../adapters/daemonclient}/openapi_client.go | 101 +- .../daemonclient}/openapi_client_test.go | 16 +- apps/druid/adapters/http/handlers/auth.go | 71 ++ .../adapters/http/handlers/dev_handler.go | 120 ++ apps/druid/adapters/http/handlers/routes.go | 27 +- .../adapters/http/handlers/routes_test.go | 27 +- .../adapters/http/handlers/scroll_handler.go | 140 +-- .../http/handlers/scroll_handler_test.go | 30 - .../http/handlers/websocket_handler.go | 15 + .../adapters/websocketclient}/attacher.go | 2 +- apps/druid/core/services/runtime_access.go | 90 ++ .../druid/core/services/runtime_controller.go | 919 -------------- .../core/services/runtime_controller_test.go | 404 ------ apps/druid/core/services/runtime_dev.go | 156 +++ apps/druid/core/services/runtime_lifecycle.go | 56 + .../core/services/runtime_materialization.go | 84 ++ apps/druid/core/services/runtime_session.go | 93 ++ .../core/services/runtime_session_cache.go | 78 ++ .../core/services/runtime_session_commands.go | 188 +++ .../core/services/runtime_session_runtime.go | 122 ++ apps/druid/core/services/runtime_status.go | 36 + .../druid/core/services/runtime_supervisor.go | 224 ++++ .../core/services/runtime_supervisor_test.go | 616 +++++++++ apps/druid/core/services/runtime_update.go | 73 ++ apps/druid/core/services/worker_callbacks.go | 65 + .../core/services/worker_callbacks_test.go | 55 + config/helm-charts/druid-cli/chart_test.go | 13 +- .../druid-cli/templates/deployment.yaml | 14 +- .../druid-cli/templates/networkpolicy.yaml | 2 + .../helm-charts/druid-cli/templates/rbac.yaml | 3 + .../druid-cli/templates/service.yaml | 4 + config/helm-charts/druid-cli/values.yaml | 6 +- docs_md/main.go | 2 - examples/README.md | 16 +- examples/minecraft/scroll.yaml | 10 +- go.mod | 3 +- go.sum | 6 +- internal/api/dev.go | 17 + internal/api/generated.go | 118 +- internal/callbackapi/generated.go | 430 +++++++ internal/core/domain/broadcast_channel.go | 3 + internal/core/domain/oci.go | 17 +- internal/core/domain/queue_item.go | 11 +- internal/core/domain/registry.go | 6 +- internal/core/domain/runtime.go | 35 - internal/core/domain/runtime_scroll.go | 26 +- internal/core/domain/scroll.go | 34 +- internal/core/domain/scroll_test.go | 8 +- internal/core/ports/services_ports.go | 104 +- internal/core/services/authorizer_service.go | 157 ++- internal/core/services/coldstarter.go | 163 +-- .../coldstarter/handler/lua_handler.go | 2 +- internal/core/services/cron_manager.go | 50 - internal/core/services/procedure_launcher.go | 22 +- .../core/services/procedure_launcher_test.go | 26 +- internal/core/services/queue_manager.go | 53 +- .../services/registry/credential_store.go | 6 + internal/core/services/registry/oci.go | 114 +- internal/core/services/registry/oci_test.go | 39 + internal/core/services/runtime_env.go | 10 + .../core/services/runtime_scroll_manager.go | 136 +- .../services/runtime_scroll_manager_test.go | 10 +- internal/core/services/runtime_state_store.go | 81 +- .../core/services/runtime_state_store_test.go | 79 +- internal/core/services/watch_service.go | 50 +- internal/core/services/watch_service_test.go | 83 +- internal/devapi/generated.go | 1117 +++++++++++++++++ internal/routing/publish.go | 92 ++ internal/routing/publish_test.go | 74 ++ internal/runtime/backend.go | 75 +- internal/runtime/backend_factory_test.go | 170 +++ internal/runtime/docker/backend.go | 737 +++++++++-- internal/runtime/docker/storage.go | 135 ++ internal/runtime/docker/storage_test.go | 109 ++ internal/runtime/kubernetes/backend.go | 394 ++++-- internal/runtime/kubernetes/hubble.go | 19 +- internal/runtime/kubernetes/names.go | 22 +- internal/runtime/kubernetes/resources.go | 199 ++- internal/runtime/kubernetes/resources_test.go | 246 +++- internal/runtime/kubernetes/state_store.go | 84 +- .../runtime/kubernetes/state_store_test.go | 15 +- internal/runtime/runtime_test.go | 127 +- internal/utils/random.go | 2 +- scripts/build_coldstarter_image.sh | 16 - test/integration/docker/docker_cli_test.go | 235 +++- test/integration/example_test.go | 5 +- test/integration/internal/e2e/harness.go | 199 ++- .../kubernetes/kubernetes_cli_test.go | 114 +- test/mock/services.go | 276 ++-- 167 files changed, 10135 insertions(+), 4510 deletions(-) create mode 100644 .vscode/tasks.json delete mode 100644 Dockerfile.coldstarter create mode 100644 api/callback-oapi-codegen.yaml create mode 100644 api/callback.openapi.yaml create mode 100644 api/dev-oapi-codegen.yaml create mode 100644 api/dev.openapi.yaml delete mode 100644 apps/druid-client/adapters/cli/attach.go delete mode 100644 apps/druid-client/adapters/cli/create.go delete mode 100644 apps/druid-client/adapters/cli/delete.go delete mode 100644 apps/druid-client/adapters/cli/describe.go delete mode 100644 apps/druid-client/adapters/cli/lifecycle.go delete mode 100644 apps/druid-client/adapters/cli/list.go delete mode 100644 apps/druid-client/adapters/cli/login.go delete mode 100644 apps/druid-client/adapters/cli/ports.go delete mode 100644 apps/druid-client/adapters/cli/pull.go delete mode 100644 apps/druid-client/adapters/cli/push.go delete mode 100644 apps/druid-client/adapters/cli/push_category.go delete mode 100644 apps/druid-client/adapters/cli/register.go delete mode 100644 apps/druid-client/adapters/cli/root.go delete mode 100644 apps/druid-client/adapters/cli/root_test.go delete mode 100644 apps/druid-client/adapters/cli/routing.go delete mode 100644 apps/druid-client/adapters/cli/run.go delete mode 100644 apps/druid-client/core/ports/runtime_daemon.go delete mode 100644 apps/druid-client/core/services/runtime_service.go delete mode 100644 apps/druid-client/main.go create mode 100644 apps/druid-coldstarter/adapters/cli/root_test.go create mode 100644 apps/druid-coldstarter/core/services/coldstarter_test.go delete mode 100644 apps/druid/adapters/cli/app_version.go create mode 100644 apps/druid/adapters/cli/attach.go create mode 100644 apps/druid/adapters/cli/callback.go create mode 100644 apps/druid/adapters/cli/client/create.go create mode 100644 apps/druid/adapters/cli/client/delete.go create mode 100644 apps/druid/adapters/cli/client/describe.go create mode 100644 apps/druid/adapters/cli/client/dev.go create mode 100644 apps/druid/adapters/cli/client/dev_test.go create mode 100644 apps/druid/adapters/cli/client/list.go rename apps/{druid-client/adapters/cli => druid/adapters/cli/client}/output.go (97%) create mode 100644 apps/druid/adapters/cli/client/ports.go create mode 100644 apps/druid/adapters/cli/client/register.go create mode 100644 apps/druid/adapters/cli/client/routing.go create mode 100644 apps/druid/adapters/cli/client/routing_apply.go create mode 100644 apps/druid/adapters/cli/client/routing_publish.go create mode 100644 apps/druid/adapters/cli/client/routing_publish_test.go create mode 100644 apps/druid/adapters/cli/client/routing_targets.go create mode 100644 apps/druid/adapters/cli/client/run.go create mode 100644 apps/druid/adapters/cli/client/start.go create mode 100644 apps/druid/adapters/cli/client/stop.go create mode 100644 apps/druid/adapters/cli/daemon.go create mode 100644 apps/druid/adapters/cli/login.go create mode 100644 apps/druid/adapters/cli/pull.go create mode 100644 apps/druid/adapters/cli/push.go create mode 100644 apps/druid/adapters/cli/push_category.go create mode 100644 apps/druid/adapters/cli/runtime_client.go delete mode 100644 apps/druid/adapters/cli/serve.go create mode 100644 apps/druid/adapters/cli/worker.go create mode 100644 apps/druid/adapters/cli/worker_pull.go create mode 100644 apps/druid/adapters/cli/worker_push.go create mode 100644 apps/druid/adapters/cli/worker_test.go rename apps/{druid-client/adapters/daemon => druid/adapters/daemonclient}/openapi_client.go (62%) rename apps/{druid-client/adapters/daemon => druid/adapters/daemonclient}/openapi_client_test.go (66%) create mode 100644 apps/druid/adapters/http/handlers/auth.go create mode 100644 apps/druid/adapters/http/handlers/dev_handler.go delete mode 100644 apps/druid/adapters/http/handlers/scroll_handler_test.go rename apps/{druid-client/adapters/websocket => druid/adapters/websocketclient}/attacher.go (98%) create mode 100644 apps/druid/core/services/runtime_access.go delete mode 100644 apps/druid/core/services/runtime_controller.go delete mode 100644 apps/druid/core/services/runtime_controller_test.go create mode 100644 apps/druid/core/services/runtime_dev.go create mode 100644 apps/druid/core/services/runtime_lifecycle.go create mode 100644 apps/druid/core/services/runtime_materialization.go create mode 100644 apps/druid/core/services/runtime_session.go create mode 100644 apps/druid/core/services/runtime_session_cache.go create mode 100644 apps/druid/core/services/runtime_session_commands.go create mode 100644 apps/druid/core/services/runtime_session_runtime.go create mode 100644 apps/druid/core/services/runtime_status.go create mode 100644 apps/druid/core/services/runtime_supervisor.go create mode 100644 apps/druid/core/services/runtime_supervisor_test.go create mode 100644 apps/druid/core/services/runtime_update.go create mode 100644 apps/druid/core/services/worker_callbacks.go create mode 100644 apps/druid/core/services/worker_callbacks_test.go create mode 100644 internal/api/dev.go create mode 100644 internal/callbackapi/generated.go delete mode 100644 internal/core/services/cron_manager.go create mode 100644 internal/devapi/generated.go create mode 100644 internal/routing/publish.go create mode 100644 internal/routing/publish_test.go create mode 100644 internal/runtime/backend_factory_test.go create mode 100644 internal/runtime/docker/storage.go create mode 100644 internal/runtime/docker/storage_test.go delete mode 100755 scripts/build_coldstarter_image.sh diff --git a/.docker/entrypoint.sh b/.docker/entrypoint.sh index 376d0312..198ade04 100755 --- a/.docker/entrypoint.sh +++ b/.docker/entrypoint.sh @@ -17,21 +17,19 @@ fi echo "Druid Version: $(druid version)" +if [ "$1" = "druid-coldstarter" ] || [ "$1" = "/usr/bin/druid-coldstarter" ]; then + exec "$@" +fi + if [ ! -z "${DRUID_REGISTRY_HOST}" ] && [ ! -z "${DRUID_REGISTRY_USER}" ] && [ ! -z "${DRUID_REGISTRY_PASSWORD}" ]; then echo "Logging into registry ${DRUID_REGISTRY_HOST}" druid login --host "${DRUID_REGISTRY_HOST}" -u "${DRUID_REGISTRY_USER}" -p "${DRUID_REGISTRY_PASSWORD}" fi -# Serve as default when no command is provided. +# Daemon is the default container mode when no command is provided. if [ -z "$input" ]; then - args=(serve) - - if [ ! -z "${DRUID_PORT}" ]; - then - args+=("--tcp") - args+=("--port" "${DRUID_PORT}") - fi + args=(daemon) # Reuse global args (cwd/config) for serve as well args+=("${global_args[@]}") diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d1db12dc..21707f2a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,7 +31,6 @@ jobs: name: build-artifacts path: | bin/druid - bin/druid-client bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh @@ -64,7 +63,6 @@ jobs: make_latest: true files: | bin/druid - bin/druid-client bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh diff --git a/.github/workflows/docker-builds.yml b/.github/workflows/docker-builds.yml index 15eea8a8..fbfa5866 100644 --- a/.github/workflows/docker-builds.yml +++ b/.github/workflows/docker-builds.yml @@ -60,77 +60,6 @@ jobs: DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - docker-coldstarter-amd64: - uses: ./.github/workflows/docker-build-reusable.yml - with: - dockerfile: Dockerfile.coldstarter - runs_on: ubuntu-latest - tags: | - highcard/druid-coldstarter:${{ inputs.version_tag }}-amd64 - artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-amd64 - build_args: | - VERSION=${{ inputs.version_tag }} - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - docker-coldstarter-arm64: - uses: ./.github/workflows/docker-build-reusable.yml - with: - dockerfile: Dockerfile.coldstarter - runs_on: ubuntu-24.04-arm - tags: | - highcard/druid-coldstarter:${{ inputs.version_tag }}-arm64 - artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-arm64 - build_args: | - VERSION=${{ inputs.version_tag }} - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DRUID_ARTIFACTS_REGISTRY_USERNAME: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - DRUID_ARTIFACTS_REGISTRY_TOKEN: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - docker-coldstarter-manifest: - name: Create multi-arch coldstarter manifests - needs: [docker-coldstarter-amd64, docker-coldstarter-arm64] - runs-on: ubuntu-latest - steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Create and push coldstarter manifests - uses: Noelware/docker-manifest-action@v1 - with: - inputs: highcard/druid-coldstarter:${{ inputs.version_tag }}-amd64,highcard/druid-coldstarter:${{ inputs.version_tag }}-arm64 - tags: >- - highcard/druid-coldstarter:${{ inputs.version }}, - highcard/druid-coldstarter:${{ inputs.version_tag }}${{ inputs.is_pr != true && ',highcard/druid-coldstarter:latest,highcard/druid-coldstarter:stable' || '' }} - push: true - - docker-coldstarter-manifest-artifacts: - name: Create multi-arch coldstarter manifests (artifacts registry) - needs: [docker-coldstarter-amd64, docker-coldstarter-arm64] - runs-on: ubuntu-latest - steps: - - name: Login to Artifacts Registry - uses: docker/login-action@v3 - with: - registry: artifacts.druid.gg - username: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_USERNAME }} - password: ${{ secrets.DRUID_ARTIFACTS_REGISTRY_TOKEN }} - - name: Create and push coldstarter manifests (artifacts) - uses: Noelware/docker-manifest-action@v1 - with: - inputs: artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-amd64,artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}-arm64 - tags: >- - artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version }}, - artifacts.druid.gg/druid-team/druid-coldstarter:${{ inputs.version_tag }}${{ inputs.is_pr != true && ',artifacts.druid.gg/druid-team/druid-coldstarter:latest,artifacts.druid.gg/druid-team/druid-coldstarter:stable' || '' }} - push: true - docker-base-manifest: name: Create multi-arch base manifests needs: [docker-base-amd64, docker-base-arm64] diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 1173de7e..d6e62afd 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -48,7 +48,6 @@ jobs: name: build-artifacts path: | bin/druid - bin/druid-client bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh @@ -75,7 +74,6 @@ jobs: prerelease: true files: | bin/druid - bin/druid-client bin/druid-coldstarter .docker/entrypoint.sh .docker/druid-install-command.sh diff --git a/.vscode/launch.json b/.vscode/launch.json index 15496181..078b2f42 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -2,20 +2,36 @@ "version": "0.2.0", "configurations": [ { - "name": "druid: serve", + "name": "druid: daemon", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", "program": "${workspaceFolder}/apps/druid", + "preLaunchTask": "k3d: build/import druid-cli pull image", + "env": { + "DRUID_K8S_PULL_IMAGE": "druid-cli:local" + }, "args": [ - "serve", + "daemon", "--socket", "/tmp/druid-vscode-runtime.sock", + "--listen", + "127.0.0.1:8081", + "--public-listen", + "127.0.0.1:8082", + "--worker-callback-listen", + "0.0.0.0:8083", + "--worker-callback-url", + "http://host.docker.internal:8083", "--state-dir", "${workspaceFolder}/.runtime-state", "--runtime", - "${input:runtimeBackend}" + "${input:runtimeBackend}", + "--k8s-kubeconfig", + "${env:HOME}/.kube/config", + "--k8s-namespace", + "default" ] }, { @@ -69,12 +85,12 @@ ] }, { - "name": "druid-client: login", + "name": "druid: login", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "cwd": "${workspaceFolder}", "args": [ "login", @@ -87,12 +103,12 @@ ] }, { - "name": "druid-client: pull", + "name": "druid: pull", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "pull", "${input:artifactRef}", @@ -100,12 +116,12 @@ ] }, { - "name": "druid-client: push", + "name": "druid: push", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "push", "${input:artifactRef}", @@ -113,12 +129,12 @@ ] }, { - "name": "druid-client: push category", + "name": "druid: push category", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "push", "category", @@ -128,43 +144,45 @@ ] }, { - "name": "druid-client: create", + "name": "druid: create", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "cwd": "${workspaceFolder}", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", "create", - "--state-dir", - "${workspaceFolder}/.runtime-state", "${input:artifactPath}" ] }, { - "name": "druid-client: register", + "name": "druid: dev", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", - "register", - "${workspaceFolder}/${input:scrollPath}" + "dev", + "${input:scrollId}", + "--watch", + "data/private/dist", + "--command", + "${input:commandName}" ] }, { - "name": "druid-client: list", + "name": "druid: list", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", @@ -172,12 +190,12 @@ ] }, { - "name": "druid-client: describe", + "name": "druid: describe", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", @@ -186,12 +204,12 @@ ] }, { - "name": "druid-client: ports", + "name": "druid: ports", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", @@ -200,12 +218,12 @@ ] }, { - "name": "druid-client: run", + "name": "druid: run", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", @@ -215,12 +233,12 @@ ] }, { - "name": "druid-client: delete", + "name": "druid: delete", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", @@ -229,12 +247,12 @@ ] }, { - "name": "druid-client: attach", + "name": "druid: attach", "type": "go", "request": "launch", "mode": "debug", "console": "integratedTerminal", - "program": "${workspaceFolder}/apps/druid-client", + "program": "${workspaceFolder}/apps/druid", "args": [ "--daemon-socket", "/tmp/druid-vscode-runtime.sock", @@ -251,8 +269,8 @@ "console": "integratedTerminal", "program": "${workspaceFolder}/apps/druid-coldstarter", "args": [ - "--runtime-config", - "${workspaceFolder}/${input:scrollPath}/data/.druid/runtime.json", + "--root", + "${workspaceFolder}/${input:scrollPath}", "--status-file", "coldstart/status.json" ] @@ -284,9 +302,10 @@ "type": "pickString", "description": "Runtime backend", "options": [ + "kubernetes", "docker" ], - "default": "docker" + "default": "kubernetes" }, { "id": "scrollPath", diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 00000000..a3993e75 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,17 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "k3d: build/import druid-cli pull image", + "type": "shell", + "command": "make", + "args": [ + "k3d-build-pull-image" + ], + "options": { + "cwd": "${workspaceFolder}" + }, + "problemMatcher": [] + } + ] +} diff --git a/CONTEXT.md b/CONTEXT.md index 1253acb1..a98fa4c1 100644 --- a/CONTEXT.md +++ b/CONTEXT.md @@ -12,7 +12,7 @@ - Split app binaries: - `apps/druid`: daemon + local OCI/validation tooling - - `apps/druid-client`: daemon client CLI + - `apps/druid`: daemon client CLI - `apps/druid-coldstarter`: standalone coldstarter - Docker runtime is the local backend. Kubernetes runtime support works in-cluster or out-of-cluster with kubeconfig and lives under `internal/runtime/kubernetes`. - Runtime concept is always `scrolls`; avoid `instances` terminology. @@ -43,33 +43,30 @@ druid login --host -u -p druid create ``` -Runtime daemon and OCI interaction is through `druid-client`: +Runtime daemon and OCI interaction is through `druid`: ```text -druid-client login --host -u -p -druid-client pull [dir] -druid-client push [artifact] [dir] -druid-client push category ... -druid-client create [name] -druid-client register [dir] [name] +druid login --host -u -p +druid pull [dir] +druid push [artifact] [dir] +druid push category ... +druid create [name] ``` ## OCI Ownership -- Flattened OCI commands now live on `druid-client`: - - old `druid registry pull` -> `druid-client pull [dir]` - - old `druid registry push` -> `druid-client push [artifact] [dir]` - - old `druid registry login` -> `druid-client login ...` -- `druid-client pull` keeps current behavior: +- Flattened OCI commands now live on `druid`: + - old `druid registry pull` -> `druid pull [dir]` + - old `druid registry push` -> `druid push [artifact] [dir]` + - old `druid registry login` -> `druid login ...` +- `druid pull` keeps current behavior: - pulls into optional dir or current working directory - includes data by default - `--no-data` skips data files -- `druid-client create` first asks the daemon to materialize: - - Kubernetes daemon creates PVCs and runs a `druid-client pull` Job in-cluster. - - Docker daemon returns materialization unsupported, then client falls back to local materialization into `state/scrolls//spec` and `state/data//data`. - - explicit `--scroll-root`/`--data-root` still materializes directly into those daemon-visible paths. -- `druid-client register [dir] [name]` reports an already checked-out scroll directory without OCI checkout/copying. -- Kubernetes create path: daemon/controller creates PVCs, runs a `druid-client pull` Job, stores runtime scroll state in ConfigMaps, stores opaque `k8s://namespace/pvc` refs there, and runs procedures as Kubernetes Jobs or StatefulSets depending on run mode. +- `druid create` first asks the daemon to materialize: + - Kubernetes daemon creates the runtime PVC and runs a `druid worker pull` Job in-cluster. + - Docker daemon uses a worker container for daemon-driven materialization. +- Kubernetes create path: daemon/controller creates PVCs, runs a `druid pull` Job, stores runtime scroll state in ConfigMaps, stores opaque `k8s://namespace/pvc` refs there, and runs procedures as Kubernetes Jobs or StatefulSets depending on run mode. - Docker runtime state stays in local SQLite; Kubernetes runtime state must recover from ConfigMaps, not `state.db`. - Kubernetes daemon auth prefers in-cluster config, then kubeconfig from `--k8s-kubeconfig`, `DRUID_K8S_KUBECONFIG`, `KUBECONFIG`, or `~/.kube/config`. @@ -133,21 +130,19 @@ Runtime state root defaults to `~/.druid/runtime`, or `--state-dir`. Paths: ```text -/scrolls//spec # daemon-owned scroll spec root; contains scroll.yaml -/data//data # runtime data directory; mounted into containers by explicit mounts +/scrolls//scroll.yaml +/scrolls//data ``` Domain: -- `RuntimeScroll.ScrollRoot`: daemon-owned spec root -- `RuntimeScroll.DataRoot`: runtime data root parent -- Runtime config generated at `/data/.druid/runtime.json` +- `RuntimeScroll.Root`: daemon-owned runtime root containing `scroll.yaml`, `data/`, and `.druid/` SQLite store: - `internal/core/services/runtime_state_store.go` - Table: `scrolls` -- `data_root` migration exists. +- Runtime state stores a single `root`. ## Runtime Mount Model @@ -175,20 +170,9 @@ mounts: Docker implementation maps: ```text -/data/ -> +/data/ -> ``` -## Runtime Config - -- Generated by daemon before running commands. -- Location: `/data/.druid/runtime.json` -- Includes: - - scroll id/name/artifact - - runtime backend and generated time - - top-level ports - - expected ports by procedure -- Coldstarter now supports `--runtime-config` and should prefer it over reading `scroll.yaml`. - ## Procedure Runtime Model - Executable runtime fields live on procedures, not commands: @@ -206,7 +190,7 @@ Docker implementation maps: - `needs` - `run` - `ProcedureLauncher` no longer owns an OCI registry client. -- Legacy `mode`, `wait`, and `data` procedures are rejected during validation. +- Unsupported `mode`, `wait`, and `data` procedure fields are rejected during validation. ## Expected Ports And Traffic @@ -234,7 +218,7 @@ Docker implementation maps: - Removed Nix/dependency-resolution support. - Removed local runtime backend. - Removed old single-scroll serve mode. -- Removed `druid daemon`, `druid runtime`, `druid runtime serve`, `druid stop`. +- Removed old `druid runtime`, `druid runtime serve`, and single-scroll `druid stop` flows. - Removed old port monitor/watch-port flow. - Removed plugin system files. - Removed legacy handlers/server/middlewares. @@ -245,12 +229,14 @@ Docker implementation maps: - `apps/druid/adapters/cli/serve.go`: daemon startup/listener setup. - `apps/druid/adapters/http/handlers/routes.go`: generated REST registration plus manual `/health` and websocket route. - `apps/druid/adapters/http/handlers/scroll_handler.go`: generated OpenAPI server handler methods for runtime scrolls. -- `apps/druid/core/services/runtime_controller.go`: run command, write runtime config, port status. -- `apps/druid-client/adapters/cli/create.go`: local materialization then daemon registration. -- `apps/druid-client/adapters/cli/pull.go`: client-owned OCI pull. -- `apps/druid-client/adapters/cli/push.go`: client-owned OCI push. -- `apps/druid-client/adapters/cli/login.go`: client-owned registry login. -- `apps/druid-client/adapters/daemon/openapi_client.go`: generated OpenAPI client adapter. +- `apps/druid/core/services/runtime_supervisor.go`: daemon coordinator for persisted runtime truth and sessions. +- `apps/druid/core/services/runtime_session.go`: in-memory scroll execution session. +- `apps/druid/core/services/runtime_materialization.go`: daemon materialization path. +- `apps/druid/adapters/cli/create.go`: REST-backed create command. +- `apps/druid/adapters/cli/pull.go`: client-owned OCI pull. +- `apps/druid/adapters/cli/push.go`: client-owned OCI push. +- `apps/druid/adapters/cli/login.go`: client-owned registry login. +- `apps/druid/adapters/daemonclient/openapi_client.go`: generated OpenAPI client adapter. - `internal/core/services/runtime_scroll_manager.go`: `RuntimeScrollManager` and `MaterializeScrollArtifact`. - `internal/core/services/runtime_state_store.go`: SQLite state store. - `internal/runtime/docker/backend.go`: Docker runtime backend. @@ -275,12 +261,11 @@ Also passed local smoke: ```text ./bin/druid serve --socket /runtime.sock --state-dir /state -./bin/druid-client --daemon-socket /runtime.sock create smoke examples/static-web --state-dir /state +./bin/druid --daemon-socket /runtime.sock create smoke examples/static-web verified: - /scrolls/smoke/spec/scroll.yaml exists - /data/smoke/data exists - /data/smoke/data/scroll.yaml does not exist - druid-client describe smoke works + /scrolls/smoke/scroll.yaml exists + /scrolls/smoke/data exists + druid describe smoke works ``` ## Known Warning @@ -295,15 +280,9 @@ This is known and currently non-blocking. ## Important Follow-Ups -- DB-first daemon resume is still conceptual, not implemented: - - daemon startup does not yet restore runners/sessions from `RuntimeScroll.Status` and `RuntimeScroll.Commands`. - - `RunRuntimeScrollCommand` still creates queue machinery per command invocation. - - Need a daemon-owned per-scroll session/controller eventually. -- DB command statuses are not yet persisted on every queue transition. +- Daemon resume hydrates per-scroll sessions from persisted `RuntimeScroll` state; this still needs more end-to-end coverage around long-running commands. +- DB command statuses are persisted through the queue status observer. - `scroll-lock.json` still exists in services and queue behavior; DB should become authoritative later. -- `runtime_instance_manager.go` filename still says instance; consider renaming to match `RuntimeScrollManager`. -- `druid-client create` local materialization assumes shared filesystem with daemon unless explicit `--scroll-root` and `--data-root` are passed. -- Kubernetes design still needs proper backend refs instead of local filesystem paths. - Docs generated under `docs_md` are stale/incomplete after command flattening; deleted stale registry/runtime command pages but did not regenerate docs. ## Current Mental Model @@ -312,29 +291,24 @@ Docker/local create: ```text druid serve --runtime docker -druid-client create [name] - -> client materializes OCI/local artifact into runtime state - -> client POSTs generated OpenAPI CreateScrollRequest with scroll_root/data_root - -> daemon reads scroll.yaml through its configured runtime backend - -> daemon caches scroll.yaml in SQLite - -druid-client register [dir] [name] - -> client reports already checked-out dir - -> daemon reads scroll.yaml through its configured runtime backend +druid create [name] + -> client POSTs generated OpenAPI CreateScrollRequest + -> daemon materializes OCI/local artifact into one runtime root + -> daemon reads scroll.yaml from the runtime root -> daemon caches scroll.yaml in SQLite -druid-client run - -> daemon writes runtime config +druid run -> daemon launches Docker procedure containers using explicit data mounts ``` -Runtime is daemon-only: `druid-client create/register/list/describe` do not send, store, or display a per-scroll runtime. +Runtime is daemon-only: `druid create/list/describe` do not send, store, or display a per-scroll runtime. Future Kubernetes create: ```text -controller/daemon creates PVC/spec volume -Kubernetes Job runs: druid-client pull [mounted-dir] -daemon/controller registers materialized scroll +daemon creates final PVC/root storage +Kubernetes Job runs: druid worker pull --artifact --root /scroll +worker reports scroll.yaml to daemon callback +daemon validates and persists RuntimeScroll backend creates Jobs/Deployments/StatefulSets from scroll procedures ``` diff --git a/Dockerfile b/Dockerfile index 2df1cffd..7f17abff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,7 +20,10 @@ FROM ubuntu:24.04 RUN touch /var/mail/ubuntu && chown ubuntu /var/mail/ubuntu && userdel -r ubuntu RUN apt-get update && apt-get install -y \ - ca-certificates wget\ + ca-certificates \ + jq \ + moreutils \ + wget \ && rm -rf /var/lib/apt/lists/* RUN ARCH=$(uname -m) && \ diff --git a/Dockerfile.coldstarter b/Dockerfile.coldstarter deleted file mode 100644 index a9cdbf74..00000000 --- a/Dockerfile.coldstarter +++ /dev/null @@ -1,10 +0,0 @@ -FROM golang:bullseye AS builder - -ARG VERSION=docker -WORKDIR /src -COPY . . -RUN CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=${VERSION}" -o /out/druid-coldstarter ./apps/druid-coldstarter - -FROM gcr.io/distroless/static-debian12:nonroot -COPY --from=builder /out/druid-coldstarter /usr/bin/druid-coldstarter -ENTRYPOINT ["/usr/bin/druid-coldstarter"] diff --git a/Dockerfile.steamcmd b/Dockerfile.steamcmd index 4a5f02f6..e0971bc7 100644 --- a/Dockerfile.steamcmd +++ b/Dockerfile.steamcmd @@ -8,11 +8,18 @@ COPY --from=base /usr/bin/druid* /usr/bin/ COPY --from=base /entrypoint.sh /entrypoint.sh RUN apt-get update && apt-get install -y \ - ca-certificates wget\ + ca-certificates \ + jq \ + moreutils \ + wget \ && rm -rf /var/lib/apt/lists/* -RUN wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq -RUN chmod +x /usr/bin/yq +RUN ARCH=$(uname -m) && \ + if [ "$ARCH" = "x86_64" ]; then YQ_ARCH="amd64"; \ + elif [ "$ARCH" = "aarch64" ]; then YQ_ARCH="arm64"; \ + else YQ_ARCH="$ARCH"; fi && \ + wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${YQ_ARCH} -O /usr/bin/yq && \ + chmod +x /usr/bin/yq # Set up user with the same UID/GID ARG UID=1000 @@ -29,4 +36,4 @@ USER druid #ENV LGSM_LOGDIR=/app/resources/deployment/log #ENV LGSM_DATADIR=/app/resources/deployment/data -ENTRYPOINT [ "/entrypoint.sh" ] \ No newline at end of file +ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/Makefile b/Makefile index c60b7ee1..1091bda3 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -.PHONY: test build build-coldstarter-image test-integration test-integration-docker test-integration-kubernetes kind-integration-up kind-integration-down +.PHONY: test build k3d-build-pull-image test-integration test-integration-docker test-integration-kubernetes kind-integration-up kind-integration-down VERSION ?= "dev" -COLDSTARTER_IMAGE ?= druid-coldstarter:local +DRUID_K8S_PULL_IMAGE ?= druid:local +K3D_CLUSTER ?= druid-gs INTEGRATION_TIMEOUT ?= 1200s KIND_CLUSTER ?= druid-cli-integration KIND_VERSION ?= v0.27.0 @@ -11,20 +12,25 @@ generate-api: ## Generate API types from OpenAPI spec @echo "Generating API types from OpenAPI spec..." @which oapi-codegen > /dev/null || (echo "Installing oapi-codegen..." && go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@v2.5.1) @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/oapi-codegen.yaml api/openapi.yaml + @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/dev-oapi-codegen.yaml api/dev.openapi.yaml + @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/callback-oapi-codegen.yaml api/callback.openapi.yaml validate-api: ## Validate OpenAPI spec @echo "Validating OpenAPI spec..." @which oapi-codegen > /dev/null || (echo "Installing oapi-codegen..." && go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@v2.5.1) @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/oapi-codegen.yaml api/openapi.yaml > /dev/null + @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/dev-oapi-codegen.yaml api/dev.openapi.yaml > /dev/null + @PATH="$(shell go env GOPATH)/bin:$$PATH" oapi-codegen -config api/callback-oapi-codegen.yaml api/callback.openapi.yaml > /dev/null @echo "✓ OpenAPI spec is valid" -build: generate-api ## Build Daemon and helper binaries +build: generate-api ## Build Druid and helper binaries CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid ./apps/druid - CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid-client ./apps/druid-client CGO_ENABLED=0 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/druid-coldstarter ./apps/druid-coldstarter -build-coldstarter-image: ## Build local druid-coldstarter Docker image without pushing - VERSION=$(VERSION) IMAGE=$(COLDSTARTER_IMAGE) ./scripts/build_coldstarter_image.sh +k3d-build-pull-image: ## Build the unified Druid runtime image and import it into local k3d. + docker build . -f Dockerfile --build-arg "VERSION=$(VERSION)" -t "$(DRUID_K8S_PULL_IMAGE)" + @docker rm -f "k3d-$(K3D_CLUSTER)-tools" >/dev/null 2>&1 || true + k3d image import "$(DRUID_K8S_PULL_IMAGE)" -c "$(K3D_CLUSTER)" build-x86-docker: docker run -e GOOS=linux -e GOARCH=amd64 -it --rm -v ./:/app -w /app --entrypoint=/bin/bash docker.elastic.co/beats-dev/golang-crossbuild:1.22.5-main -c 'CGO_ENABLED=1 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/x86/druid' diff --git a/README.md b/README.md index ec8a590d..f10e3f50 100644 --- a/README.md +++ b/README.md @@ -28,11 +28,10 @@ A Scroll is an OCI Artifact, so it is easy to distribute with registries like Do ### Binaries -This repository builds three isolated binaries: +This repository builds two runtime binaries: -- `apps/druid` -> `bin/druid`: daemon plus local validation/update tooling. -- `apps/druid-client` -> `bin/druid-client`: client-only CLI for daemon API and OCI commands. -- `apps/druid-coldstarter` -> `bin/druid-coldstarter`: standalone coldstart gate binary/image. +- `apps/druid` -> `bin/druid`: daemon, REST-backed CLI, OCI commands, and internal worker mode. +- `apps/druid-coldstarter` -> `bin/druid-coldstarter`: coldstart gate binary included in the runtime image. Build all binaries with: @@ -43,14 +42,13 @@ make build Common local flow: ```bash -druid serve --runtime docker -druid-client login --host -u -p -druid-client pull [dir] -druid-client push [artifact] [dir] -druid-client create [name] -druid-client register [dir] [name] -druid-client run -druid-client describe +druid daemon --runtime docker +druid login --host -u -p +druid pull [dir] +druid push [artifact] [dir] +druid create [name] +druid run +druid describe ``` For examples, omit `[name]` so each scroll derives its own id from `scroll.yaml`. @@ -67,9 +65,9 @@ There is also websocket support for stdout. TTY is also supported. ### Runtime backend -Runtime selection is daemon-only: start the daemon with `druid serve --runtime docker`, then use `druid-client` to create, register, run, and inspect scrolls without passing a runtime. Docker runtime state stays in SQLite under the runtime state directory. Scroll specs and runtime data are materialized separately so containers only receive explicit mounts from runtime `data/`. +Runtime selection is daemon-only: start the daemon with `druid daemon --runtime docker`, then use `druid` to create, run, and inspect scrolls without passing a runtime. Docker runtime state stays in SQLite under the runtime state directory. Scroll specs and runtime data live together in one runtime root. -Kubernetes runtime support is available with `druid serve --runtime kubernetes` for in-cluster daemons or out-of-cluster daemons using kubeconfig. It stores daemon scroll state in ConfigMaps, materializes OCI artifacts through cluster Jobs, and uses Cilium/Hubble Relay for port traffic presence. See `docs/kubernetes_runtime.md` for kubeconfig, RBAC, PVC, and Hubble setup. +Kubernetes runtime support is available with `druid daemon --runtime kubernetes` for in-cluster daemons or out-of-cluster daemons using kubeconfig. It stores daemon scroll state in ConfigMaps, materializes OCI artifacts through `druid worker pull` Jobs, and uses Cilium/Hubble Relay for port traffic presence. See `docs/kubernetes_runtime.md` for kubeconfig, RBAC, PVC, and Hubble setup. ## Documentation diff --git a/api/callback-oapi-codegen.yaml b/api/callback-oapi-codegen.yaml new file mode 100644 index 00000000..4bafcc8b --- /dev/null +++ b/api/callback-oapi-codegen.yaml @@ -0,0 +1,7 @@ +package: callbackapi +output: internal/callbackapi/generated.go +generate: + client: true + fiber-server: true + models: true + embedded-spec: true diff --git a/api/callback.openapi.yaml b/api/callback.openapi.yaml new file mode 100644 index 00000000..cff69de4 --- /dev/null +++ b/api/callback.openapi.yaml @@ -0,0 +1,51 @@ +openapi: 3.1.0 +info: + title: Druid Internal Callback API + version: 0.1.0 + description: Internal callback API used by workers and dev servers. +servers: + - url: / +tags: + - name: worker +paths: + /internal/v1/workers/{runtime_id}/complete: + post: + operationId: completeWorker + tags: [worker] + summary: Complete a pending worker action + parameters: + - $ref: '#/components/parameters/Runtime' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/WorkerResult' + responses: + '204': + description: Worker result accepted + '400': + description: Invalid worker result + '401': + description: Invalid worker token or unknown action +components: + parameters: + Runtime: + name: runtime_id + in: path + required: true + schema: + type: string + schemas: + WorkerResult: + type: object + required: [token] + properties: + token: + type: string + scroll_yaml: + type: string + artifact_digest: + type: string + error: + type: string diff --git a/api/dev-oapi-codegen.yaml b/api/dev-oapi-codegen.yaml new file mode 100644 index 00000000..6986684c --- /dev/null +++ b/api/dev-oapi-codegen.yaml @@ -0,0 +1,7 @@ +package: devapi +output: internal/devapi/generated.go +generate: + client: true + fiber-server: true + models: true + embedded-spec: true diff --git a/api/dev.openapi.yaml b/api/dev.openapi.yaml new file mode 100644 index 00000000..30c9d52b --- /dev/null +++ b/api/dev.openapi.yaml @@ -0,0 +1,112 @@ +openapi: 3.1.0 +info: + title: Druid Dev Server + version: 0.1.0 + description: Runtime dev worker API for file access, health, and watch notifications. +servers: + - url: / +tags: + - name: health + - name: files + - name: watch +paths: + /health: + get: + operationId: getHealth + tags: [health] + summary: Check dev server health + responses: + '200': + description: Dev server is healthy + content: + text/plain: + schema: + type: string + example: ok + /api/v1/files: + get: + operationId: getFile + tags: [files] + summary: Read a file from the runtime root + parameters: + - $ref: '#/components/parameters/FilePath' + responses: + '200': + description: File contents + content: + text/plain: + schema: + type: string + application/octet-stream: + schema: + type: string + format: binary + '400': + $ref: '#/components/responses/BadRequest' + '404': + $ref: '#/components/responses/NotFound' + head: + operationId: headFile + tags: [files] + summary: Check if a runtime file exists + parameters: + - $ref: '#/components/parameters/FilePath' + responses: + '200': + description: File exists + '400': + $ref: '#/components/responses/BadRequest' + '404': + $ref: '#/components/responses/NotFound' + put: + operationId: putFile + tags: [files] + summary: Write a file into the runtime root + parameters: + - $ref: '#/components/parameters/FilePath' + requestBody: + required: true + content: + text/plain: + schema: + type: string + application/octet-stream: + schema: + type: string + format: binary + responses: + '204': + description: File written + '400': + $ref: '#/components/responses/BadRequest' + options: + operationId: optionsFile + tags: [files] + summary: Return CORS/WebDAV file access options + parameters: + - $ref: '#/components/parameters/FilePath' + responses: + '204': + description: Options returned + /ws/v1/watch/notify: + get: + operationId: watchNotifications + tags: [watch] + summary: Subscribe to file change and build notifications + responses: + '101': + description: WebSocket upgrade accepted +components: + parameters: + FilePath: + name: path + in: query + required: true + schema: + type: string + description: Runtime-root-relative file path, for example data/private/package.json. + responses: + BadRequest: + description: Invalid request + NotFound: + description: File not found diff --git a/api/openapi.yaml b/api/openapi.yaml index 34bc8632..c30800b7 100644 --- a/api/openapi.yaml +++ b/api/openapi.yaml @@ -86,15 +86,13 @@ components: type: string description: OCI artifact reference or local scroll path example: artifacts.druid.gg/test/test:test - scroll_root: - type: string - description: Optional daemon-local path or backend ref containing scroll.yaml and scroll spec files. If omitted, a materializing runtime backend may pull the artifact. - data_root: + owner_id: type: string - description: Optional daemon-local path or backend ref containing runtime data directory. If omitted, a materializing runtime backend may pull the artifact. - start: - type: boolean - default: true + description: Runtime owner id used for customer-facing route authorization. + registry_credentials: + type: array + items: + $ref: '#/components/schemas/RegistryCredential' EnsureScrollRequest: type: object @@ -107,13 +105,29 @@ components: type: string artifact: type: string - scroll_root: + artifact_digest: type: string - data_root: + owner_id: + type: string + description: Runtime owner id used for customer-facing route authorization. + registry_credentials: + type: array + items: + $ref: '#/components/schemas/RegistryCredential' + + RegistryCredential: + type: object + required: + - host + - username + - password + properties: + host: + type: string + username: + type: string + password: type: string - start: - type: boolean - default: true RuntimeRoutingTarget: type: object @@ -184,6 +198,10 @@ components: type: string restart: type: boolean + registry_credentials: + type: array + items: + $ref: '#/components/schemas/RegistryCredential' default: false CommandStatusMap: @@ -201,8 +219,7 @@ components: required: - id - artifact - - scroll_root - - data_root + - root - scroll_name - status - created_at @@ -214,9 +231,7 @@ components: type: string artifact: type: string - scroll_root: - type: string - data_root: + root: type: string scroll_name: type: string diff --git a/apps/druid-client/adapters/cli/attach.go b/apps/druid-client/adapters/cli/attach.go deleted file mode 100644 index 18838a8c..00000000 --- a/apps/druid-client/adapters/cli/attach.go +++ /dev/null @@ -1,17 +0,0 @@ -package cli - -import ( - ws "github.com/highcard-dev/daemon/apps/druid-client/adapters/websocket" - "github.com/spf13/cobra" -) - -func (a *App) attachCmd() *cobra.Command { - return &cobra.Command{ - Use: "attach ", - Short: "Attach to a daemon-managed runtime console", - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return ws.NewAttacher(a.daemonSocket).Attach(cmd.Context(), args[0], args[1]) - }, - } -} diff --git a/apps/druid-client/adapters/cli/create.go b/apps/druid-client/adapters/cli/create.go deleted file mode 100644 index 2fdaf54a..00000000 --- a/apps/druid-client/adapters/cli/create.go +++ /dev/null @@ -1,117 +0,0 @@ -package cli - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/highcard-dev/daemon/apps/druid-client/adapters/daemon" - "github.com/highcard-dev/daemon/internal/core/domain" - coreservices "github.com/highcard-dev/daemon/internal/core/services" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/spf13/cobra" -) - -func (a *App) createCmd() *cobra.Command { - var stateDir string - var scrollRoot string - var dataRoot string - var noData bool - var noStart bool - cmd := &cobra.Command{ - Use: "create [name]", - Short: "Create a scroll through the daemon", - Args: cobra.RangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - artifact := args[0] - name := "" - if len(args) == 2 { - name = args[1] - } - if stateDir == "" { - defaultStateDir, err := utils.DefaultRuntimeStateDir() - if err != nil { - return err - } - stateDir = defaultStateDir - } - if (scrollRoot == "") != (dataRoot == "") || (scrollRoot != "" && scrollRoot != dataRoot) { - return fmt.Errorf("--scroll-root and --data-root are legacy flags and must be omitted or equal") - } - - service, err := a.runtimeService() - if err != nil { - return err - } - - if scrollRoot != "" { - if strings.Contains(scrollRoot, "://") { - dataRoot = scrollRoot - } else { - if err := coreservices.MaterializeScrollArtifact(artifact, scrollRoot, scrollRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { - return err - } - dataRoot = scrollRoot - } - } else { - if !localArtifactExists(artifact) { - scroll, err := service.Create(cmd.Context(), name, artifact, "", "", !noStart) - if err == nil { - return printJSON(scroll) - } - if !errors.Is(err, daemon.ErrMaterializationUnsupported) { - return err - } - } - store := coreservices.NewRuntimeStateStore(stateDir) - tmpParent := filepath.Join(stateDir, "tmp") - if err := os.MkdirAll(tmpParent, 0755); err != nil { - return err - } - tmpDir, err := os.MkdirTemp(tmpParent, "create-scroll-*") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - stagedRoot := filepath.Join(tmpDir, "root") - if err := coreservices.MaterializeScrollArtifact(artifact, stagedRoot, stagedRoot, registry.NewOciClient(a.loadRegistryStore()), !noData); err != nil { - return err - } - stagedScroll, err := domain.NewScroll(stagedRoot) - if err != nil { - return err - } - id, err := coreservices.RuntimeScrollID(name, stagedScroll.Name) - if err != nil { - return err - } - scrollRoot = store.ScrollRoot(id) - dataRoot = scrollRoot - if err := coreservices.MoveMaterializedScroll(stagedRoot, stagedRoot, scrollRoot, dataRoot); err != nil { - return err - } - } - - scroll, err := service.Create(cmd.Context(), name, artifact, scrollRoot, dataRoot, !noStart) - if err != nil { - return err - } - return printJSON(scroll) - }, - } - cmd.Flags().StringVar(&stateDir, "state-dir", "", "Runtime state directory for local materialization (default: ~/.druid/runtime)") - cmd.Flags().StringVar(&scrollRoot, "scroll-root", "", "Daemon-local path containing materialized scroll spec") - cmd.Flags().StringVar(&dataRoot, "data-root", "", "Daemon-local path containing runtime data") - cmd.Flags().BoolVar(&noData, "no-data", false, "Skip scroll data files") - cmd.Flags().BoolVar(&noStart, "no-start", false, "Create the scroll without starting its serve command") - return cmd -} - -func localArtifactExists(artifact string) bool { - _, err := os.Stat(artifact) - return err == nil -} diff --git a/apps/druid-client/adapters/cli/delete.go b/apps/druid-client/adapters/cli/delete.go deleted file mode 100644 index 9420694c..00000000 --- a/apps/druid-client/adapters/cli/delete.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import "github.com/spf13/cobra" - -func (a *App) deleteCmd() *cobra.Command { - return &cobra.Command{ - Use: "delete ", - Short: "Delete a scroll from the daemon", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - deleted, err := service.Delete(cmd.Context(), args[0]) - if err != nil { - return err - } - return printJSON(deleted) - }, - } -} diff --git a/apps/druid-client/adapters/cli/describe.go b/apps/druid-client/adapters/cli/describe.go deleted file mode 100644 index c3440357..00000000 --- a/apps/druid-client/adapters/cli/describe.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import "github.com/spf13/cobra" - -func (a *App) describeCmd() *cobra.Command { - return &cobra.Command{ - Use: "describe ", - Short: "Describe a scroll from the daemon", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - scroll, err := service.Describe(cmd.Context(), args[0]) - if err != nil { - return err - } - return printJSON(scroll) - }, - } -} diff --git a/apps/druid-client/adapters/cli/lifecycle.go b/apps/druid-client/adapters/cli/lifecycle.go deleted file mode 100644 index 2126b50f..00000000 --- a/apps/druid-client/adapters/cli/lifecycle.go +++ /dev/null @@ -1,41 +0,0 @@ -package cli - -import "github.com/spf13/cobra" - -func (a *App) startCmd() *cobra.Command { - return &cobra.Command{ - Use: "start ", - Short: "Start the daemon-managed scroll serve command", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - scroll, err := service.Start(cmd.Context(), args[0]) - if err != nil { - return err - } - return printJSON(scroll) - }, - } -} - -func (a *App) stopCmd() *cobra.Command { - return &cobra.Command{ - Use: "stop ", - Short: "Stop daemon-managed runtime workloads for a scroll", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - scroll, err := service.Stop(cmd.Context(), args[0]) - if err != nil { - return err - } - return printJSON(scroll) - }, - } -} diff --git a/apps/druid-client/adapters/cli/list.go b/apps/druid-client/adapters/cli/list.go deleted file mode 100644 index f5d133f7..00000000 --- a/apps/druid-client/adapters/cli/list.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import "github.com/spf13/cobra" - -func (a *App) listCmd() *cobra.Command { - return &cobra.Command{ - Use: "list", - Short: "List scrolls and status from the daemon", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - scrolls, err := service.List(cmd.Context()) - if err != nil { - return err - } - return printScrolls(scrolls) - }, - } -} diff --git a/apps/druid-client/adapters/cli/login.go b/apps/druid-client/adapters/cli/login.go deleted file mode 100644 index 135b4ba6..00000000 --- a/apps/druid-client/adapters/cli/login.go +++ /dev/null @@ -1,71 +0,0 @@ -package cli - -import ( - "fmt" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func (a *App) loginCmd() *cobra.Command { - var registryHost string - var registryUser string - var registryPassword string - - cmd := &cobra.Command{ - Use: "login", - Short: "Login to OCI registry", - Long: `Add or update registry credentials in the configuration. -Supports multiple registries with path-based credential matching. - -Examples: - druid-client login --host registry-1.docker.io -u user -p pass - druid-client login --host artifacts.druid.gg/project1 -u user1 -p pass1 - druid-client login --host artifacts.druid.gg/project2 -u user2 -p pass2`, - RunE: func(cmd *cobra.Command, args []string) error { - - if err := registry.ValidateCredentials(registryHost, registryUser, registryPassword); err != nil { - return fmt.Errorf("login failed: %w", err) - } - - cmd.Println("Login succeeded") - - var registries []domain.RegistryCredential - viper.UnmarshalKey("registries", ®istries) - - newCred := domain.RegistryCredential{ - Host: registryHost, - Username: registryUser, - Password: registryPassword, - } - - found := false - for i := range registries { - if registries[i].Host == registryHost { - registries[i] = newCred - found = true - break - } - } - - if !found { - registries = append(registries, newCred) - } - - viper.Set("registries", registries) - - return viper.WriteConfig() - }, - } - - cmd.Flags().StringVar(®istryHost, "host", "", "OCI registry host (e.g., artifacts.druid.gg/project1)") - cmd.Flags().StringVarP(®istryUser, "user", "u", "", "username") - cmd.Flags().StringVarP(®istryPassword, "password", "p", "", "User password") - - cmd.MarkFlagRequired("host") - cmd.MarkFlagRequired("user") - cmd.MarkFlagRequired("password") - return cmd -} diff --git a/apps/druid-client/adapters/cli/ports.go b/apps/druid-client/adapters/cli/ports.go deleted file mode 100644 index 686cba0e..00000000 --- a/apps/druid-client/adapters/cli/ports.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import "github.com/spf13/cobra" - -func (a *App) portsCmd() *cobra.Command { - return &cobra.Command{ - Use: "ports ", - Short: "Show runtime port status for a scroll", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - ports, err := service.Ports(cmd.Context(), args[0]) - if err != nil { - return err - } - return printJSON(ports) - }, - } -} diff --git a/apps/druid-client/adapters/cli/pull.go b/apps/druid-client/adapters/cli/pull.go deleted file mode 100644 index ee992bbe..00000000 --- a/apps/druid-client/adapters/cli/pull.go +++ /dev/null @@ -1,36 +0,0 @@ -package cli - -import ( - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" -) - -func (a *App) pullCmd() *cobra.Command { - var noData bool - cmd := &cobra.Command{ - Use: "pull [dir]", - Short: "Pull a scroll from an OCI registry (tag or digest)", - Args: cobra.RangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - artifact := args[0] - dir := currentWorkingDir() - if len(args) == 2 { - dir = args[1] - } - - registryClient := registry.NewOciClient(a.loadRegistryStore()) - - err := registryClient.PullSelective(dir, artifact, !noData, nil) - if err != nil { - logger.Log().Error("Failed to pull from registry") - return err - } - - logger.Log().Info("Pulled from registry") - return nil - }, - } - cmd.Flags().BoolVar(&noData, "no-data", false, "Skip scroll data files") - return cmd -} diff --git a/apps/druid-client/adapters/cli/push.go b/apps/druid-client/adapters/cli/push.go deleted file mode 100644 index ec7889b1..00000000 --- a/apps/druid-client/adapters/cli/push.go +++ /dev/null @@ -1,129 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -func (a *App) pushCmd() *cobra.Command { - var minRam string - var minCpu string - var minDisk string - var image string - var scrollPorts []string - var packMeta bool - var smart bool - var category string - - cmd := &cobra.Command{ - Use: "push [artifact] [dir]", - Short: "Generate OCI Artifacts and push to a remote registry", - Args: cobra.MaximumNArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - credStore := a.loadRegistryStore() - - fullPath := currentWorkingDir() - artifact := "" - switch len(args) { - case 1: - if isScrollDir(args[0]) { - fullPath = args[0] - } else { - artifact = args[0] - } - case 2: - artifact = args[0] - fullPath = args[1] - } - - scroll, err := domain.NewScroll(fullPath) - - if err != nil { - return err - } - - repo := scroll.Name - tag := scroll.AppVersion - - if artifact != "" { - repo, tag = utils.SplitArtifact(artifact) - } - - logger.Log().Info("Pushing "+repo+":"+tag+" to registry", zap.String("path", fullPath)) - - ociClient := registry.NewOciClient(credStore) - - overrides := map[string]string{} - if minRam != "" { - overrides["gg.druid.scroll.minRam"] = minRam - } - if minCpu != "" { - overrides["gg.druid.scroll.minCpu"] = minCpu - } - if minDisk != "" { - overrides["gg.druid.scroll.minDisk"] = minDisk - } - if image != "" { - overrides["gg.druid.scroll.image"] = image - } - if smart { - overrides["gg.druid.scroll.smart"] = "true" - } - if category != "" { - overrides["gg.druid.scroll.category"] = category - } - for _, p := range scrollPorts { - parts := strings.Split(p, "=") - name := parts[0] - port := "0" - if len(parts) == 2 { - port = parts[1] - } - overrides[fmt.Sprintf("gg.druid.scroll.port.%s", name)] = port - } - - _, err = ociClient.Push(fullPath, repo, tag, overrides, packMeta, &scroll.File) - if err != nil { - return err - } - - logger.Log().Info("Pushed "+scroll.Name+" to registry", zap.String("path", fullPath)) - return nil - }, - } - - cmd.AddCommand(a.pushCategoryCmd()) - - cmd.Flags().StringVarP(&minRam, "min-ram", "r", minRam, "Minimum RAM required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minRam)") - cmd.Flags().StringVarP(&minCpu, "min-cpu", "c", minCpu, "Minimum CPU required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minCpu)") - cmd.Flags().StringVarP(&minDisk, "min-disk", "d", minDisk, "Minimum Disk required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minDisk)") - cmd.Flags().BoolVarP(&smart, "smart", "s", false, "Indicates, if the scroll is able to run as a smart deployment (Will be added as a manifest annotation gg.druid.scroll.smart)") - cmd.Flags().StringVar(&category, "category", category, "Category of the scroll. (Will be added as a manifest annotation gg.druid.scroll.category)") - - cmd.Flags().StringVarP(&image, "image", "i", image, "Image to use for the scroll. (Will be added as a manifest annotation gg.druid.scroll.image)") - - cmd.Flags().StringSliceVarP(&scrollPorts, "port", "p", scrollPorts, "Ports to expose. Format webserver=80, dns=53/udp or just ftp (Will be added as a manifest annotation gg.druid.scroll.ports.)") - - cmd.Flags().BoolVarP(&packMeta, "pack-meta", "m", packMeta, "Pack the meta folder into the scroll.") - return cmd -} - -func isScrollDir(dir string) bool { - if dir == "" { - return false - } - path := filepath.Join(dir, "scroll.yaml") - if _, err := os.Stat(path); err == nil { - return true - } - return false -} diff --git a/apps/druid-client/adapters/cli/push_category.go b/apps/druid-client/adapters/cli/push_category.go deleted file mode 100644 index 20bb783f..00000000 --- a/apps/druid-client/adapters/cli/push_category.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -func (a *App) pushCategoryCmd() *cobra.Command { - var pushCategoryNamePattern string - - cmd := &cobra.Command{ - Use: "category", - Short: "Push locale markdown files (e.g. de-DE.md) from a scroll directory as separate OCI layers.", - Args: cobra.RangeArgs(2, 3), - RunE: func(cmd *cobra.Command, args []string) error { - credStore := a.loadRegistryStore() - - repo := args[0] - category := args[1] - scrollDir := currentWorkingDir() - if len(args) == 3 { - scrollDir = args[2] - } - - logger.Log().Info("Pushing "+repo+" category to registry", zap.String("scrollDir", scrollDir)) - - ociClient := registry.NewOciClient(credStore) - - _, err := ociClient.PushCategory(scrollDir, repo, category) - - if err != nil { - return err - } - - logger.Log().Info("Pushed " + repo + " category to registry") - return nil - }, - } - cmd.Flags().StringVar(&pushCategoryNamePattern, "match", "", "Regexp matching file basenames to push (default: locale markdown like de-DE.md)") - return cmd -} diff --git a/apps/druid-client/adapters/cli/register.go b/apps/druid-client/adapters/cli/register.go deleted file mode 100644 index 0fffa878..00000000 --- a/apps/druid-client/adapters/cli/register.go +++ /dev/null @@ -1,48 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/spf13/cobra" -) - -func (a *App) registerCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "register [dir] [name]", - Short: "Register an already checked-out scroll with the daemon", - Args: cobra.MaximumNArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - dir := currentWorkingDir() - name := "" - if len(args) >= 1 { - dir = args[0] - } - if len(args) == 2 { - name = args[1] - } - scrollRoot, err := filepath.Abs(dir) - if err != nil { - return err - } - info, err := os.Stat(filepath.Join(scrollRoot, "scroll.yaml")) - if err != nil { - return fmt.Errorf("registered scroll directory must contain scroll.yaml: %w", err) - } - if info.IsDir() { - return fmt.Errorf("registered scroll directory must contain scroll.yaml file") - } - service, err := a.runtimeService() - if err != nil { - return err - } - scroll, err := service.Create(cmd.Context(), name, scrollRoot, scrollRoot, scrollRoot, true) - if err != nil { - return err - } - return printJSON(scroll) - }, - } - return cmd -} diff --git a/apps/druid-client/adapters/cli/root.go b/apps/druid-client/adapters/cli/root.go deleted file mode 100644 index cb80ffad..00000000 --- a/apps/druid-client/adapters/cli/root.go +++ /dev/null @@ -1,94 +0,0 @@ -package cli - -import ( - "os" - - "github.com/highcard-dev/daemon/apps/druid-client/adapters/daemon" - "github.com/highcard-dev/daemon/apps/druid-client/core/services" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services/registry" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -type App struct { - daemonSocket string - configFile string - envPath string -} - -func NewRootCommand() *cobra.Command { - app := &App{} - cmd := &cobra.Command{ - Use: "druid-client", - Short: "Druid runtime daemon client", - Run: func(cmd *cobra.Command, args []string) { - cmd.Usage() - }, - } - cobra.OnInitialize(app.initConfig) - cmd.PersistentFlags().StringVar(&app.daemonSocket, "daemon-socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path") - cmd.PersistentFlags().StringVar(&app.configFile, "config", "", "Path to config file (default: ~/.druid.yaml)") - cmd.PersistentFlags().StringVarP(&app.envPath, "env-file", "e", "./.env", "Path to environment file (.env)") - cmd.AddCommand(app.createCmd()) - cmd.AddCommand(app.registerCmd()) - cmd.AddCommand(app.listCmd()) - cmd.AddCommand(app.describeCmd()) - cmd.AddCommand(app.deleteCmd()) - cmd.AddCommand(app.runCmd()) - cmd.AddCommand(app.startCmd()) - cmd.AddCommand(app.stopCmd()) - cmd.AddCommand(app.portsCmd()) - cmd.AddCommand(app.routingCmd()) - cmd.AddCommand(app.attachCmd()) - cmd.AddCommand(app.pullCmd()) - cmd.AddCommand(app.pushCmd()) - cmd.AddCommand(app.loginCmd()) - return cmd -} - -func (a *App) initConfig() { - viper.AutomaticEnv() - if a.configFile != "" { - viper.SetConfigFile(a.configFile) - } else { - home, err := os.UserHomeDir() - cobra.CheckErr(err) - viper.SetConfigType("yaml") - viper.SetConfigName(".druid") - viper.AddConfigPath(home) - } - viper.SafeWriteConfig() - viper.ReadInConfig() -} - -func (a *App) loadRegistryStore() *registry.CredentialStore { - var registries []domain.RegistryCredential - viper.UnmarshalKey("registries", ®istries) - if len(registries) == 0 { - host := viper.GetString("registry.host") - user := viper.GetString("registry.user") - password := viper.GetString("registry.password") - if host != "" { - registries = append(registries, domain.RegistryCredential{Host: host, Username: user, Password: password}) - } - } - return registry.NewCredentialStore(registries) -} - -func (a *App) runtimeService() (*services.RuntimeService, error) { - client, err := daemon.NewOpenAPIClient(a.daemonSocket) - if err != nil { - return nil, err - } - return services.NewRuntimeService(client), nil -} - -func currentWorkingDir() string { - cwd, err := os.Getwd() - if err != nil { - return "." - } - return cwd -} diff --git a/apps/druid-client/adapters/cli/root_test.go b/apps/druid-client/adapters/cli/root_test.go deleted file mode 100644 index a43ad782..00000000 --- a/apps/druid-client/adapters/cli/root_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package cli - -import "testing" - -func TestRootCommandExposesOCICommands(t *testing.T) { - root := NewRootCommand() - for _, name := range []string{"pull", "push", "login", "register"} { - cmd, _, err := root.Find([]string{name}) - if err != nil || cmd == nil || cmd.Name() != name { - t.Fatalf("druid-client should expose %q", name) - } - } - cmd, _, err := root.Find([]string{"push", "category"}) - if err != nil || cmd == nil || cmd.Name() != "category" { - t.Fatalf("druid-client should expose push category") - } -} - -func TestRegisterRejectsDirectoryWithoutScrollYAML(t *testing.T) { - cmd := (&App{}).registerCmd() - err := cmd.RunE(cmd, []string{t.TempDir()}) - if err == nil { - t.Fatal("register should reject directory without scroll.yaml") - } -} - -func TestRootCommandIsSocketOnly(t *testing.T) { - root := NewRootCommand() - if flag := root.PersistentFlags().Lookup("daemon-url"); flag != nil { - t.Fatal("druid-client should not expose --daemon-url") - } - if flag := root.PersistentFlags().Lookup("daemon-socket"); flag == nil { - t.Fatal("druid-client should expose --daemon-socket") - } -} - -func TestRootCommandDoesNotExposeCWDFlag(t *testing.T) { - root := NewRootCommand() - if flag := root.PersistentFlags().Lookup("cwd"); flag != nil { - t.Fatal("druid-client should not expose --cwd") - } -} - -func TestCreateAndRegisterDoNotExposeRuntimeFlag(t *testing.T) { - app := &App{} - if flag := app.createCmd().Flags().Lookup("runtime"); flag != nil { - t.Fatal("druid-client create should not expose --runtime") - } - if flag := app.registerCmd().Flags().Lookup("runtime"); flag != nil { - t.Fatal("druid-client register should not expose --runtime") - } -} diff --git a/apps/druid-client/adapters/cli/routing.go b/apps/druid-client/adapters/cli/routing.go deleted file mode 100644 index b0e47e0f..00000000 --- a/apps/druid-client/adapters/cli/routing.go +++ /dev/null @@ -1,95 +0,0 @@ -package cli - -import ( - "encoding/json" - "fmt" - "io" - "os" - - "github.com/highcard-dev/daemon/internal/api" - "github.com/spf13/cobra" -) - -func (a *App) routingCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "routing", - Short: "Inspect or apply daemon runtime routing", - } - cmd.AddCommand(a.routingTargetsCmd()) - cmd.AddCommand(a.routingApplyCmd()) - return cmd -} - -func (a *App) routingTargetsCmd() *cobra.Command { - return &cobra.Command{ - Use: "targets ", - Short: "Show backend service targets for a scroll", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - targets, err := service.RoutingTargets(cmd.Context(), args[0]) - if err != nil { - return err - } - return printJSON(targets) - }, - } -} - -func (a *App) routingApplyCmd() *cobra.Command { - var file string - cmd := &cobra.Command{ - Use: "apply ", - Short: "Persist assigned public routing for a scroll", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if file == "" { - return fmt.Errorf("--file is required") - } - payload, err := readRoutingAssignments(file) - if err != nil { - return err - } - service, err := a.runtimeService() - if err != nil { - return err - } - scroll, err := service.ApplyRouting(cmd.Context(), args[0], payload.Assignments) - if err != nil { - return err - } - return printJSON(scroll) - }, - } - cmd.Flags().StringVarP(&file, "file", "f", "", "JSON file with an assignments array, or '-' for stdin") - return cmd -} - -type routingAssignmentsPayload struct { - Assignments []api.RuntimeRouteAssignment `json:"assignments"` -} - -func readRoutingAssignments(file string) (routingAssignmentsPayload, error) { - var data []byte - var err error - if file == "-" { - data, err = io.ReadAll(os.Stdin) - } else { - data, err = os.ReadFile(file) - } - if err != nil { - return routingAssignmentsPayload{}, err - } - var payload routingAssignmentsPayload - if err := json.Unmarshal(data, &payload); err == nil && payload.Assignments != nil { - return payload, nil - } - var assignments []api.RuntimeRouteAssignment - if err := json.Unmarshal(data, &assignments); err != nil { - return routingAssignmentsPayload{}, err - } - return routingAssignmentsPayload{Assignments: assignments}, nil -} diff --git a/apps/druid-client/adapters/cli/run.go b/apps/druid-client/adapters/cli/run.go deleted file mode 100644 index 87a158be..00000000 --- a/apps/druid-client/adapters/cli/run.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import "github.com/spf13/cobra" - -func (a *App) runCmd() *cobra.Command { - return &cobra.Command{ - Use: "run ", - Short: "Run a command on a daemon-managed scroll", - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - service, err := a.runtimeService() - if err != nil { - return err - } - scroll, err := service.Run(cmd.Context(), args[0], args[1]) - if err != nil { - return err - } - return printJSON(scroll) - }, - } -} diff --git a/apps/druid-client/core/ports/runtime_daemon.go b/apps/druid-client/core/ports/runtime_daemon.go deleted file mode 100644 index 16b5182b..00000000 --- a/apps/druid-client/core/ports/runtime_daemon.go +++ /dev/null @@ -1,24 +0,0 @@ -package ports - -import ( - "context" - - "github.com/highcard-dev/daemon/internal/api" -) - -type RuntimeDaemon interface { - CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string, start bool) (*api.RuntimeScroll, error) - ListScrolls(ctx context.Context) ([]api.RuntimeScroll, error) - GetScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) - DeleteScroll(ctx context.Context, id string) (*api.DeletedScroll, error) - RunScrollCommand(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) - GetScrollPorts(ctx context.Context, id string) ([]api.RuntimePortStatus, error) - StartScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) - StopScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) - GetScrollRoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) - ApplyScrollRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) -} - -type ConsoleAttacher interface { - Attach(ctx context.Context, scroll string, console string) error -} diff --git a/apps/druid-client/core/services/runtime_service.go b/apps/druid-client/core/services/runtime_service.go deleted file mode 100644 index 027e9d6e..00000000 --- a/apps/druid-client/core/services/runtime_service.go +++ /dev/null @@ -1,56 +0,0 @@ -package services - -import ( - "context" - - "github.com/highcard-dev/daemon/apps/druid-client/core/ports" - "github.com/highcard-dev/daemon/internal/api" -) - -type RuntimeService struct { - daemon ports.RuntimeDaemon -} - -func NewRuntimeService(daemon ports.RuntimeDaemon) *RuntimeService { - return &RuntimeService{daemon: daemon} -} - -func (s *RuntimeService) Create(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string, start bool) (*api.RuntimeScroll, error) { - return s.daemon.CreateScroll(ctx, name, artifact, scrollRoot, dataRoot, start) -} - -func (s *RuntimeService) List(ctx context.Context) ([]api.RuntimeScroll, error) { - return s.daemon.ListScrolls(ctx) -} - -func (s *RuntimeService) Describe(ctx context.Context, id string) (*api.RuntimeScroll, error) { - return s.daemon.GetScroll(ctx, id) -} - -func (s *RuntimeService) Delete(ctx context.Context, id string) (*api.DeletedScroll, error) { - return s.daemon.DeleteScroll(ctx, id) -} - -func (s *RuntimeService) Run(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) { - return s.daemon.RunScrollCommand(ctx, id, command) -} - -func (s *RuntimeService) Ports(ctx context.Context, id string) ([]api.RuntimePortStatus, error) { - return s.daemon.GetScrollPorts(ctx, id) -} - -func (s *RuntimeService) Start(ctx context.Context, id string) (*api.RuntimeScroll, error) { - return s.daemon.StartScroll(ctx, id) -} - -func (s *RuntimeService) Stop(ctx context.Context, id string) (*api.RuntimeScroll, error) { - return s.daemon.StopScroll(ctx, id) -} - -func (s *RuntimeService) RoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) { - return s.daemon.GetScrollRoutingTargets(ctx, id) -} - -func (s *RuntimeService) ApplyRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) { - return s.daemon.ApplyScrollRouting(ctx, id, assignments) -} diff --git a/apps/druid-client/main.go b/apps/druid-client/main.go deleted file mode 100644 index 8cb59f72..00000000 --- a/apps/druid-client/main.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "os" - - "github.com/highcard-dev/daemon/apps/druid-client/adapters/cli" - "github.com/highcard-dev/daemon/internal/utils/logger" -) - -func main() { - logger.Log(logger.WithStructuredLogging()) - if err := cli.NewRootCommand().Execute(); err != nil { - os.Exit(1) - } -} diff --git a/apps/druid-coldstarter/adapters/cli/root.go b/apps/druid-coldstarter/adapters/cli/root.go index 3f6919cf..25060749 100644 --- a/apps/druid-coldstarter/adapters/cli/root.go +++ b/apps/druid-coldstarter/adapters/cli/root.go @@ -2,7 +2,7 @@ package cli import ( "context" - "fmt" + "errors" "os" "os/signal" "syscall" @@ -12,28 +12,25 @@ import ( "github.com/spf13/cobra" ) -func NewRootCommand() *cobra.Command { - var scrollRoot string - var statusFile string - var runtimeConfig string +const ( + rootEnv = "DRUID_ROOT" + statusFileEnv = "DRUID_COLDSTARTER_STATUS_FILE" +) +func NewRootCommand() *cobra.Command { cmd := &cobra.Command{ Use: "druid-coldstarter", Short: "Run the standalone Druid coldstart gate", RunE: func(cmd *cobra.Command, args []string) error { - if scrollRoot == "" && runtimeConfig == "" { - return fmt.Errorf("--scroll-root or --runtime-config is required") + root := os.Getenv(rootEnv) + if root == "" { + return errors.New(rootEnv + " is required") } ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM, syscall.SIGINT) defer stop() - if runtimeConfig != "" { - return services.NewColdstarterService(filesystem.NewStatusWriter()).RunWithRuntimeConfig(ctx, runtimeConfig, statusFile) - } - return services.NewColdstarterService(filesystem.NewStatusWriter()).Run(ctx, scrollRoot, statusFile) + return services.NewColdstarterService(filesystem.NewStatusWriter()).Run(ctx, root, os.Getenv(statusFileEnv)) }, } - cmd.Flags().StringVar(&scrollRoot, "scroll-root", "", "Mounted scroll root containing scroll.yaml") - cmd.Flags().StringVar(&runtimeConfig, "runtime-config", "", "Generated runtime config path") - cmd.Flags().StringVar(&statusFile, "status-file", "", "Optional status file path, relative to scroll root unless absolute") + cmd.SilenceUsage = true return cmd } diff --git a/apps/druid-coldstarter/adapters/cli/root_test.go b/apps/druid-coldstarter/adapters/cli/root_test.go new file mode 100644 index 00000000..46dbe2b0 --- /dev/null +++ b/apps/druid-coldstarter/adapters/cli/root_test.go @@ -0,0 +1,32 @@ +package cli + +import ( + "strings" + "testing" +) + +func TestRootCommandHasNoRuntimeFlags(t *testing.T) { + cmd := NewRootCommand() + if cmd.Flags().Lookup("root") != nil { + t.Fatal("did not expect root flag") + } + if cmd.Flags().Lookup("status-file") != nil { + t.Fatal("did not expect status-file flag") + } + if cmd.Flags().Lookup("scroll-root") != nil { + t.Fatal("did not expect scroll-root flag") + } + if cmd.Flags().Lookup("runtime-config") != nil { + t.Fatal("did not expect runtime-config flag") + } +} + +func TestRootCommandRequiresRootEnv(t *testing.T) { + t.Setenv(rootEnv, "") + cmd := NewRootCommand() + cmd.SetArgs(nil) + err := cmd.Execute() + if err == nil || !strings.Contains(err.Error(), rootEnv+" is required") { + t.Fatalf("expected %s validation error, got %v", rootEnv, err) + } +} diff --git a/apps/druid-coldstarter/adapters/filesystem/status_writer.go b/apps/druid-coldstarter/adapters/filesystem/status_writer.go index 905bfa20..8c4ec9b2 100644 --- a/apps/druid-coldstarter/adapters/filesystem/status_writer.go +++ b/apps/druid-coldstarter/adapters/filesystem/status_writer.go @@ -22,10 +22,10 @@ func NewStatusWriter() *StatusWriter { return &StatusWriter{} } -func (w *StatusWriter) Write(scrollRoot string, statusFile string, port *domain.AugmentedPort) error { +func (w *StatusWriter) Write(root string, statusFile string, port *domain.AugmentedPort) error { path := statusFile if !filepath.IsAbs(path) { - path = filepath.Join(scrollRoot, statusFile) + path = filepath.Join(root, statusFile) } data := status{FinishedAt: time.Now().UTC()} diff --git a/apps/druid-coldstarter/core/ports/status_writer.go b/apps/druid-coldstarter/core/ports/status_writer.go index 8cd47d10..5733a925 100644 --- a/apps/druid-coldstarter/core/ports/status_writer.go +++ b/apps/druid-coldstarter/core/ports/status_writer.go @@ -3,5 +3,5 @@ package ports import "github.com/highcard-dev/daemon/internal/core/domain" type StatusWriter interface { - Write(scrollRoot string, statusFile string, port *domain.AugmentedPort) error + Write(root string, statusFile string, port *domain.AugmentedPort) error } diff --git a/apps/druid-coldstarter/core/services/coldstarter.go b/apps/druid-coldstarter/core/services/coldstarter.go index c248df3a..92f3dc0f 100644 --- a/apps/druid-coldstarter/core/services/coldstarter.go +++ b/apps/druid-coldstarter/core/services/coldstarter.go @@ -2,13 +2,9 @@ package services import ( "context" - "encoding/json" "fmt" - "os" - "path/filepath" "github.com/highcard-dev/daemon/apps/druid-coldstarter/core/ports" - "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/utils/logger" "go.uber.org/zap" @@ -22,46 +18,8 @@ func NewColdstarterService(statusWriter ports.StatusWriter) *ColdstarterService return &ColdstarterService{statusWriter: statusWriter} } -func (s *ColdstarterService) RunWithRuntimeConfig(ctx context.Context, runtimeConfigPath string, statusFile string) error { - data, err := os.ReadFile(runtimeConfigPath) - if err != nil { - return fmt.Errorf("failed to read runtime config: %w", err) - } - var config domain.RuntimeConfig - if err := json.Unmarshal(data, &config); err != nil { - return fmt.Errorf("failed to parse runtime config: %w", err) - } - if len(config.Ports) == 0 { - return fmt.Errorf("no ports found in runtime config") - } - - logger.Log().Info("Coldstart runtime config loaded", zap.String("scroll", config.Scroll.ID), zap.Any("ports", config.Ports)) - runtimeRoot := filepath.Dir(runtimeConfigPath) - if filepath.Base(runtimeRoot) == domain.RuntimeConfigDir { - runtimeRoot = filepath.Dir(runtimeRoot) - } - portService := services.NewPortServiceWithScrollFile(&domain.File{Ports: config.Ports}) - coldStarter := services.NewColdStarter(portService, nil, runtimeRoot) - - finish := coldStarter.Start(ctx) - select { - case <-ctx.Done(): - coldStarter.Stop() - return ctx.Err() - case port := <-finish: - coldStarter.Stop() - if statusFile != "" && s.statusWriter != nil { - if err := s.statusWriter.Write(runtimeRoot, statusFile, port); err != nil { - return err - } - } - logger.Log().Info("Coldstarter finished") - return nil - } -} - -func (s *ColdstarterService) Run(ctx context.Context, scrollRoot string, statusFile string) error { - scrollService, err := services.NewScrollService(scrollRoot) +func (s *ColdstarterService) Run(ctx context.Context, root string, statusFile string) error { + scrollService, err := services.NewScrollService(root) if err != nil { return fmt.Errorf("failed to load scroll: %w", err) } @@ -84,7 +42,7 @@ func (s *ColdstarterService) Run(ctx context.Context, scrollRoot string, statusF case port := <-finish: coldStarter.Stop() if statusFile != "" && s.statusWriter != nil { - if err := s.statusWriter.Write(scrollRoot, statusFile, port); err != nil { + if err := s.statusWriter.Write(root, statusFile, port); err != nil { return err } } diff --git a/apps/druid-coldstarter/core/services/coldstarter_test.go b/apps/druid-coldstarter/core/services/coldstarter_test.go new file mode 100644 index 00000000..b6dc7c0d --- /dev/null +++ b/apps/druid-coldstarter/core/services/coldstarter_test.go @@ -0,0 +1,78 @@ +package services + +import ( + "context" + "net" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/filesystem" +) + +func TestColdstarterRunServesGenericPortAndWritesStatus(t *testing.T) { + root := t.TempDir() + port := freeTCPPort(t) + scroll := []byte(`name: test/coldstarter +version: 0.1.0 +ports: + - name: main + protocol: tcp + port: ` + port + ` + sleep_handler: generic +commands: {} +`) + if err := os.WriteFile(filepath.Join(root, "scroll.yaml"), scroll, 0644); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + errCh := make(chan error, 1) + go func() { + errCh <- NewColdstarterService(filesystem.NewStatusWriter()).Run(ctx, root, ".coldstarter.json") + }() + + conn := dialTCP(t, "127.0.0.1:"+port) + _, _ = conn.Write([]byte("wake")) + _ = conn.Close() + + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + case <-time.After(3 * time.Second): + t.Fatal("coldstarter did not finish") + } + if _, err := os.Stat(filepath.Join(root, ".coldstarter.json")); err != nil { + t.Fatalf("status file missing: %v", err) + } +} + +func freeTCPPort(t *testing.T) string { + t.Helper() + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer listener.Close() + return strconv.Itoa(listener.Addr().(*net.TCPAddr).Port) +} + +func dialTCP(t *testing.T, addr string) net.Conn { + t.Helper() + deadline := time.Now().Add(2 * time.Second) + for { + conn, err := net.DialTimeout("tcp", addr, 100*time.Millisecond) + if err == nil { + return conn + } + if time.Now().After(deadline) { + t.Fatalf("dial %s: %v", addr, err) + } + time.Sleep(50 * time.Millisecond) + } +} diff --git a/apps/druid/adapters/cli/app_version.go b/apps/druid/adapters/cli/app_version.go deleted file mode 100644 index f225c992..00000000 --- a/apps/druid/adapters/cli/app_version.go +++ /dev/null @@ -1,129 +0,0 @@ -package cli - -import ( - "fmt" - "os" - - semver "github.com/Masterminds/semver/v3" - "github.com/highcard-dev/daemon/internal/core/services" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" -) - -var AppVersionCmd = &cobra.Command{ - Use: "app_version [semver1 string] [lt|gt|eq|ne|le|ge] [semver2 string]", - Short: "Show or compare active app version", - Long: "This command shows the active app version. If a comparison operator and semver string are provided, the command will compare the active app version to the semver string. If the comparison is true, the command will exit with a 0 exit code. If the comparison is false, the command will exit with a 1 exit code.", - Args: cobra.MaximumNArgs(3), - RunE: func(cmd *cobra.Command, args []string) error { - logger.Log() - - scrollService, err := services.NewScrollService(currentWorkingDir()) - - if err != nil { - return fmt.Errorf("error creating scroll service: %w", err) - } - - scroll := scrollService.GetFile() - - if err != nil { - return fmt.Errorf("error loading scroll: %w", err) - } - - // If no args, just print the version - if len(args) == 0 { - print(scroll.AppVersion) - return nil - } - - semverAppVersion, err := semver.NewVersion(scroll.AppVersion) - - if err != nil { - return fmt.Errorf("error parsing application version as semver: %w", err) - } - - // If one arg, check if it's equal to the version - if len(args) == 1 { - compareVersionSemverString := args[0] - compareVersionSemver, err := semver.NewVersion(compareVersionSemverString) - if err != nil { - return fmt.Errorf("error parsing application version: %w", err) - } - if semverAppVersion.Equal(compareVersionSemver) { - return nil - } else { - os.Exit(1) - } - } - - var compare string - var semver1, semver2 *semver.Version - - if len(args) == 2 { - compare = args[0] - semver1 = semverAppVersion - semver2, err = semver.NewVersion(args[1]) - if err != nil { - return fmt.Errorf("error parsing application version (argument 2): %w", err) - } - } else { - compare = args[1] - semver1, err = semver.NewVersion(args[0]) - if err != nil { - return fmt.Errorf("error parsing application version (argument 1): %w", err) - } - semver2, err = semver.NewVersion(args[2]) - if err != nil { - return fmt.Errorf("error parsing application version (argument 3): %w", err) - } - - } - - if compare == "eq" { - if semver1.Equal(semver2) { - return nil - } else { - os.Exit(1) - } - } - if compare == "lt" { - if semver1.LessThan(semver2) { - return nil - } else { - os.Exit(1) - } - } - if compare == "gt" { - if semver1.GreaterThan(semver2) { - return nil - } else { - os.Exit(1) - } - } - if compare == "ne" { - if !semver1.Equal(semver2) { - return nil - } else { - os.Exit(1) - } - } - if compare == "le" { - if semver1.LessThan(semver2) || semver1.Equal(semver2) { - return nil - } else { - os.Exit(1) - } - } - if compare == "ge" { - if semver1.GreaterThan(semver2) || semver1.Equal(semver2) { - return nil - } else { - os.Exit(1) - } - } - return fmt.Errorf("invalid comparison operator: %s", compare) - }, -} - -func init() { -} diff --git a/apps/druid/adapters/cli/attach.go b/apps/druid/adapters/cli/attach.go new file mode 100644 index 00000000..bfb8ba5c --- /dev/null +++ b/apps/druid/adapters/cli/attach.go @@ -0,0 +1,19 @@ +package cli + +import ( + "github.com/highcard-dev/daemon/apps/druid/adapters/websocketclient" + "github.com/spf13/cobra" +) + +var AttachCommand = &cobra.Command{ + Use: "attach ", + Short: "Attach to a daemon-managed runtime console", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return websocketclient.NewAttacher(daemonSocket).Attach(cmd.Context(), args[0], args[1]) + }, +} + +func init() { + RootCmd.AddCommand(AttachCommand) +} diff --git a/apps/druid/adapters/cli/callback.go b/apps/druid/adapters/cli/callback.go new file mode 100644 index 00000000..cc154488 --- /dev/null +++ b/apps/druid/adapters/cli/callback.go @@ -0,0 +1,33 @@ +package cli + +import ( + "github.com/gofiber/fiber/v2" + appservices "github.com/highcard-dev/daemon/apps/druid/core/services" + "github.com/highcard-dev/daemon/internal/callbackapi" + "github.com/highcard-dev/daemon/internal/core/ports" +) + +type runtimeCallbackHandler struct { + callbacks *appservices.WorkerCallbackManager +} + +func (h runtimeCallbackHandler) CompleteWorker(c *fiber.Ctx, runtimeID callbackapi.Runtime) error { + var result callbackapi.WorkerResult + if err := c.BodyParser(&result); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + runtimeResult := ports.RuntimeWorkerResult{} + if result.ScrollYaml != nil { + runtimeResult.ScrollYAML = *result.ScrollYaml + } + if result.ArtifactDigest != nil { + runtimeResult.ArtifactDigest = *result.ArtifactDigest + } + if result.Error != nil { + runtimeResult.Error = *result.Error + } + if err := h.callbacks.Complete(string(runtimeID), result.Token, runtimeResult); err != nil { + return fiber.NewError(fiber.StatusUnauthorized, err.Error()) + } + return c.SendStatus(fiber.StatusNoContent) +} diff --git a/apps/druid/adapters/cli/client/create.go b/apps/druid/adapters/cli/client/create.go new file mode 100644 index 00000000..b175a804 --- /dev/null +++ b/apps/druid/adapters/cli/client/create.go @@ -0,0 +1,52 @@ +package client + +import ( + "context" + + "github.com/highcard-dev/daemon/internal/api" + "github.com/spf13/cobra" +) + +var CreateCommand = &cobra.Command{ + Use: "create [name]", + Short: "Create a scroll through the daemon", + Example: ` druid create ./scroll my-scroll -p 8080:http + druid create artifacts.example/app:v1 my-scroll -p 8080:80 + druid create ./scroll my-scroll -p 127.0.0.1:8080:http + druid create ./scroll my-scroll -p 8443:http/https`, + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + artifact := args[0] + name := "" + if len(args) == 2 { + name = args[1] + } + runtimeClient, err := runtimeDaemonClient() + if err != nil { + return err + } + + scroll, err := createScrollWithRouting(cmd.Context(), runtimeClient, artifact, name, registryCredentials(), createPublishes) + if err != nil { + return err + } + return printJSON(scroll) + }, +} + +var createPublishes []string + +func init() { + CreateCommand.Flags().StringArrayVarP(&createPublishes, "publish", "p", nil, "Publish routing as [external-ip:]public-port:target[/protocol]") +} + +func createScrollWithRouting(ctx context.Context, daemon RuntimeDaemon, artifact string, name string, registryCredentials []api.RegistryCredential, publishes []string) (*api.RuntimeScroll, error) { + scroll, err := daemon.CreateScroll(ctx, name, artifact, registryCredentials) + if err != nil { + return nil, err + } + if len(publishes) == 0 { + return scroll, nil + } + return applyPublishedRouting(ctx, daemon, scroll.Id, publishes) +} diff --git a/apps/druid/adapters/cli/client/delete.go b/apps/druid/adapters/cli/client/delete.go new file mode 100644 index 00000000..4004194b --- /dev/null +++ b/apps/druid/adapters/cli/client/delete.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var DeleteCommand = &cobra.Command{ + Use: "delete ", + Short: "Delete a scroll from the daemon", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + deleted, err := daemon.DeleteScroll(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(deleted) + }, +} diff --git a/apps/druid/adapters/cli/client/describe.go b/apps/druid/adapters/cli/client/describe.go new file mode 100644 index 00000000..7738434c --- /dev/null +++ b/apps/druid/adapters/cli/client/describe.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var DescribeCommand = &cobra.Command{ + Use: "describe ", + Short: "Describe a scroll from the daemon", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + scroll, err := daemon.GetScroll(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(scroll) + }, +} diff --git a/apps/druid/adapters/cli/client/dev.go b/apps/druid/adapters/cli/client/dev.go new file mode 100644 index 00000000..d7a15c38 --- /dev/null +++ b/apps/druid/adapters/cli/client/dev.go @@ -0,0 +1,419 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gofiber/contrib/websocket" + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/adaptor" + "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/devapi" + "github.com/spf13/cobra" + "golang.org/x/net/webdav" +) + +var devWatchPaths []string +var devCommands []string +var devDisable bool +var devStatus bool +var devTrigger bool +var devRoot string +var devListen string +var devRuntimeID string +var devDaemonURL string +var devDaemonToken string +var devOwnerID string +var devAuthJWKSURL string +var devRuntimeJWKSURL string + +var DevCommand = &cobra.Command{ + Use: "dev [name]", + Short: "Control daemon-backed scroll development mode", + Example: ` druid dev my-scroll --watch data/private/dist + druid dev my-scroll --watch data/private/dist --command build + druid dev --root /scroll --listen :8084 --runtime-id my-scroll + druid dev my-scroll --status + druid dev my-scroll --disable`, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + if devRoot != "" { + return runDevServer() + } + if len(args) != 1 { + return fmt.Errorf("scroll name is required unless --root is set") + } + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + id := args[0] + modes := 0 + for _, enabled := range []bool{devDisable, devStatus} { + if enabled { + modes++ + } + } + if modes > 1 || devTrigger { + return fmt.Errorf("--status and --disable cannot be combined; use druid run to trigger commands") + } + if devStatus { + status, err := daemon.WatchStatus(cmd.Context(), id) + if err != nil { + return err + } + return printJSON(status) + } + if devDisable { + status, err := daemon.DisableWatch(cmd.Context(), id) + if err != nil { + return err + } + return printJSON(status) + } + if len(devWatchPaths) == 0 { + devWatchPaths = []string{"."} + } + status, err := daemon.EnableWatch(cmd.Context(), id, api.DevWatchRequest{ + WatchPaths: devWatchPaths, + HotReloadCommands: devCommands, + }) + if err != nil { + return err + } + return printJSON(status) + }, +} + +func init() { + DevCommand.Flags().StringSliceVar(&devWatchPaths, "watch", nil, "Path to watch, relative to the scroll root; repeatable") + DevCommand.Flags().StringSliceVar(&devCommands, "command", nil, "Scroll command to run on startup and file changes; repeatable") + DevCommand.Flags().BoolVar(&devDisable, "disable", false, "Disable development watch mode") + DevCommand.Flags().BoolVar(&devStatus, "status", false, "Show development watch mode status") + DevCommand.Flags().BoolVar(&devTrigger, "trigger", false, "Deprecated; use druid run ") + DevCommand.Flags().StringVar(&devRoot, "root", "", "Mounted runtime root; when set, run the dev WebDAV/watch server") + DevCommand.Flags().StringVar(&devListen, "listen", ":8084", "Dev server listen address") + DevCommand.Flags().StringVar(&devRuntimeID, "runtime-id", "", "Runtime id") + DevCommand.Flags().StringVar(&devDaemonURL, "daemon-url", "", "Daemon management API URL") + DevCommand.Flags().StringVar(&devDaemonToken, "daemon-token", "", "Daemon management token") + DevCommand.Flags().StringVar(&devOwnerID, "owner-id", "", "Runtime owner id for customer-facing auth") + DevCommand.Flags().StringVar(&devAuthJWKSURL, "auth-jwks-url", "", "JWKS URL for customer JWTs") + DevCommand.Flags().StringVar(&devRuntimeJWKSURL, "runtime-jwks-url", "", "JWKS URL for short-lived runtime tokens") +} + +func runDevServer() error { + if devRuntimeID == "" { + return fmt.Errorf("--runtime-id is required with --root") + } + root, err := filepath.Abs(devRoot) + if err != nil { + return err + } + if len(devWatchPaths) == 0 { + devWatchPaths = []string{"."} + } + if devDaemonURL == "" { + devDaemonURL = os.Getenv("DRUID_DAEMON_URL") + } + if devDaemonToken == "" { + devDaemonToken = os.Getenv("DRUID_INTERNAL_TOKEN") + } + auth := devAuth{runtimeID: devRuntimeID, ownerID: devOwnerID} + if devAuthJWKSURL != "" { + auth.user, err = coreservices.NewAuthorizer(devAuthJWKSURL, "") + if err != nil { + return err + } + } + if devRuntimeJWKSURL != "" { + auth.runtime, err = coreservices.NewRuntimeTokenVerifier(devRuntimeJWKSURL) + if err != nil { + return err + } + } + broadcast := domain.NewHub() + go broadcast.Run() + queue := &devTriggerQueue{broadcast: broadcast, commands: append([]string(nil), devCommands...)} + watch := coreservices.NewDevService(queue, devScrollService{commands: devCommands}) + if len(devCommands) > 0 { + if err := watch.SetHotReloadCommands(devCommands); err != nil { + return err + } + } + if err := watch.StartWatching(root, devWatchPaths...); err != nil { + return err + } + + app := newDevApp(root, broadcast, queue, auth) + return app.Listen(devListen) +} + +type devAuth struct { + user ports.AuthorizerServiceInterface + runtime ports.AuthorizerServiceInterface + runtimeID string + ownerID string +} + +func newDevApp(root string, broadcast *domain.BroadcastChannel, queue *devTriggerQueue, authOpt ...devAuth) *fiber.App { + auth := devAuth{} + if len(authOpt) > 0 { + auth = authOpt[0] + } + app := fiber.New(fiber.Config{ + DisableStartupMessage: true, + RequestMethods: append(fiber.DefaultMethods, "PROPFIND", "MKCOL", "MOVE", "COPY"), + }) + server := devServer{root: root, broadcast: broadcast, queue: queue, auth: auth} + app.Use(func(c *fiber.Ctx) error { + c.Set("Access-Control-Allow-Origin", "*") + c.Set("Access-Control-Allow-Methods", "GET,HEAD,PUT,OPTIONS,PROPFIND,MKCOL,MOVE,COPY,DELETE") + c.Set("Access-Control-Allow-Headers", "Origin,Content-Type,Accept,Authorization,Cache-Control,Depth,Destination,Overwrite") + if c.Method() == fiber.MethodOptions && c.Path() != "/api/v1/files" && !strings.HasPrefix(c.Path(), "/webdav/") { + return c.SendStatus(fiber.StatusNoContent) + } + return c.Next() + }) + app.Use(server.authMiddleware) + devapi.RegisterHandlers(app, server) + webdavHandler := adaptor.HTTPHandler(&webdav.Handler{ + Prefix: "/webdav", + FileSystem: webdav.Dir(root), + LockSystem: webdav.NewMemLS(), + }) + app.All("/webdav/*", func(c *fiber.Ctx) error { + if err := webdavHandler(c); err != nil { + return err + } + switch c.Method() { + case fiber.MethodPut, "DELETE", "MKCOL", "MOVE", "COPY": + if c.Response().StatusCode() < fiber.StatusBadRequest { + server.queue.Trigger() + } + } + return nil + }) + return app +} + +type devServer struct { + root string + broadcast *domain.BroadcastChannel + queue *devTriggerQueue + auth devAuth +} + +func (s devServer) GetHealth(c *fiber.Ctx) error { return c.SendString("ok") } + +func (s devServer) authMiddleware(c *fiber.Ctx) error { + if c.Path() == "/health" || c.Method() == fiber.MethodOptions { + return c.Next() + } + if s.auth.user == nil && s.auth.runtime == nil { + return c.Next() + } + write := c.Method() == fiber.MethodPut || c.Method() == fiber.MethodPost || c.Method() == fiber.MethodPatch || + c.Method() == fiber.MethodDelete || c.Method() == "MKCOL" || c.Method() == "MOVE" || c.Method() == "COPY" + if s.auth.user != nil { + if ctx, err := s.auth.user.CheckHeader(c); err == nil && ctx != nil { + if s.auth.ownerID != "" && ctx.Subject != s.auth.ownerID { + return fiber.NewError(fiber.StatusForbidden, "runtime owner mismatch") + } + return c.Next() + } else if write { + if err != nil { + return fiber.NewError(fiber.StatusUnauthorized, err.Error()) + } + return fiber.NewError(fiber.StatusUnauthorized, "missing token") + } + } + if !write && s.auth.runtime != nil { + if _, err := s.auth.runtime.CheckQuery(s.auth.runtimeID, c.Query("token")); err == nil { + return c.Next() + } + } + if write || s.auth.runtime != nil { + return fiber.NewError(fiber.StatusUnauthorized, "missing or invalid token") + } + return c.Next() +} + +func (s devServer) GetFile(c *fiber.Ctx, params devapi.GetFileParams) error { + return s.sendFile(c, params.Path) +} + +func (s devServer) HeadFile(c *fiber.Ctx, params devapi.HeadFileParams) error { + return s.sendFile(c, params.Path) +} + +func (s devServer) OptionsFile(c *fiber.Ctx, _ devapi.OptionsFileParams) error { + c.Set("DAV", "1") + c.Set("Allow", "OPTIONS, GET, HEAD, PUT") + return c.SendStatus(fiber.StatusNoContent) +} + +func (s devServer) PutFile(c *fiber.Ctx, params devapi.PutFileParams) error { + return s.writeFile(c, params.Path) +} + +func (s devServer) WatchNotifications(c *fiber.Ctx) error { + return websocket.New(func(conn *websocket.Conn) { + defer conn.Close() + sub := s.broadcast.Subscribe() + if sub == nil { + return + } + defer s.broadcast.Unsubscribe(sub) + ping := time.NewTicker(30 * time.Second) + defer ping.Stop() + for { + select { + case msg, ok := <-sub: + if !ok || msg == nil { + return + } + if err := conn.WriteMessage(websocket.TextMessage, *msg); err != nil { + return + } + case <-ping.C: + if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + } + } + })(c) +} + +func (s devServer) sendFile(c *fiber.Ctx, raw string) error { + fullPath, err := devFilePath(s.root, raw) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + data, err := os.ReadFile(fullPath) + if err != nil { + if os.IsNotExist(err) { + return fiber.NewError(fiber.StatusNotFound, err.Error()) + } + return err + } + if contentType := mime.TypeByExtension(filepath.Ext(fullPath)); contentType != "" { + c.Set(fiber.HeaderContentType, contentType) + } + c.Set(fiber.HeaderContentLength, strconv.Itoa(len(data))) + if c.Method() == fiber.MethodHead { + return c.SendStatus(fiber.StatusOK) + } + return c.Send(data) +} + +func (s devServer) writeFile(c *fiber.Ctx, raw string) error { + fullPath, err := devFilePath(s.root, raw) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + if err := os.WriteFile(fullPath, c.Body(), 0644); err != nil { + return err + } + s.queue.Trigger() + return c.SendStatus(fiber.StatusNoContent) +} + +func devFilePath(root string, raw string) (string, error) { + cleaned := filepath.Clean(strings.TrimPrefix(raw, "/")) + if cleaned == "." || cleaned == ".." || strings.HasPrefix(cleaned, "../") { + return "", fmt.Errorf("invalid path %q", raw) + } + full := filepath.Join(root, filepath.FromSlash(cleaned)) + rel, err := filepath.Rel(root, full) + if err != nil || rel == ".." || strings.HasPrefix(rel, "../") { + return "", fmt.Errorf("invalid path %q", raw) + } + return full, nil +} + +type devTriggerQueue struct { + broadcast *domain.BroadcastChannel + commands []string +} + +func (q *devTriggerQueue) AddTempItem(string) error { return q.Trigger() } +func (q *devTriggerQueue) AddTempItemWithWait(command string) error { + return q.runCommand(command) +} +func (q *devTriggerQueue) GetQueue() map[string]domain.ScrollLockStatus { + return nil +} + +func (q *devTriggerQueue) Trigger() error { + for _, command := range q.commands { + q.broadcastEvent("build-started") + err := q.runCommand(command) + q.broadcastEvent("build-ended") + if err != nil { + return err + } + } + return nil +} + +func (q *devTriggerQueue) broadcastEvent(name string) { + data, _ := json.Marshal(map[string]any{"command_key": name, "timestamp": time.Now()}) + q.broadcast.Broadcast(data) +} + +func (q *devTriggerQueue) runCommand(command string) error { + if command == "" { + return nil + } + if devDaemonURL == "" { + return fmt.Errorf("dev daemon URL is required to run %s", command) + } + client, err := api.NewClientWithResponses(devDaemonURL, api.WithRequestEditorFn(func(ctx context.Context, req *http.Request) error { + if devDaemonToken != "" { + req.Header.Set("Authorization", "Bearer "+devDaemonToken) + } + return nil + })) + if err != nil { + return err + } + res, err := client.RunScrollCommandWithResponse(context.Background(), devRuntimeID, command) + if err != nil { + return err + } + if res.StatusCode() < 200 || res.StatusCode() >= 300 { + return fmt.Errorf("run command %s failed: %s", command, res.Status()) + } + return nil +} + +type devScrollService struct { + commands []string +} + +func (s devScrollService) GetCommand(cmd string) (*domain.CommandInstructionSet, error) { + for _, command := range s.commands { + if command == cmd { + return &domain.CommandInstructionSet{}, nil + } + } + return nil, fmt.Errorf("command %s not found", cmd) +} +func (s devScrollService) GetCurrent() *domain.Scroll { return nil } +func (s devScrollService) GetFile() *domain.File { return &domain.File{} } +func (s devScrollService) GetDir() string { return "" } +func (s devScrollService) GetCwd() string { return "" } diff --git a/apps/druid/adapters/cli/client/dev_test.go b/apps/druid/adapters/cli/client/dev_test.go new file mode 100644 index 00000000..f2951b8b --- /dev/null +++ b/apps/druid/adapters/cli/client/dev_test.go @@ -0,0 +1,211 @@ +package client + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/gofiber/fiber/v2" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" +) + +func TestDevCommandExposesFlags(t *testing.T) { + for _, name := range []string{"watch", "command", "disable", "status", "trigger", "root", "listen"} { + if flag := DevCommand.Flags().Lookup(name); flag == nil { + t.Fatalf("druid dev should expose --%s", name) + } + } +} + +func TestDevServerWebDAVReadWriteAndCallback(t *testing.T) { + root := t.TempDir() + if err := os.MkdirAll(filepath.Join(root, "data/private"), 0755); err != nil { + t.Fatal(err) + } + runCalls := 0 + daemon := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/v1/scrolls/smoke/commands/build" { + t.Fatalf("unexpected daemon path %s", r.URL.Path) + } + if r.Header.Get("Authorization") != "Bearer secret" { + t.Fatalf("missing daemon token") + } + runCalls++ + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"id":"smoke"}`)) + })) + defer daemon.Close() + oldURL, oldToken, oldRuntimeID := devDaemonURL, devDaemonToken, devRuntimeID + devDaemonURL = daemon.URL + devDaemonToken = "secret" + devRuntimeID = "smoke" + t.Cleanup(func() { + devDaemonURL, devDaemonToken, devRuntimeID = oldURL, oldToken, oldRuntimeID + }) + + broadcast := domain.NewHub() + go broadcast.Run() + app := newDevApp(root, broadcast, &devTriggerQueue{broadcast: broadcast, commands: []string{"build"}}) + + req := httptest.NewRequest(http.MethodPut, "/webdav/data/private/config.json", strings.NewReader(`{"ok":true}`)) + res, err := app.Test(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != http.StatusNoContent && res.StatusCode != http.StatusCreated { + t.Fatalf("PUT status = %d", res.StatusCode) + } + if runCalls != 1 { + t.Fatalf("runCalls = %d, want 1", runCalls) + } + if got, err := os.ReadFile(filepath.Join(root, "data/private/config.json")); err != nil || string(got) != `{"ok":true}` { + t.Fatalf("written file = %q, err = %v", got, err) + } + + res, err = app.Test(httptest.NewRequest(http.MethodGet, "/webdav/data/private/config.json", nil)) + if err != nil { + t.Fatal(err) + } + body, _ := io.ReadAll(res.Body) + _ = res.Body.Close() + if res.StatusCode != http.StatusOK || string(body) != `{"ok":true}` { + t.Fatalf("GET status=%d body=%q", res.StatusCode, body) + } + + res, err = app.Test(httptest.NewRequest(http.MethodHead, "/webdav/data/private/config.json", nil)) + if err != nil { + t.Fatal(err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusOK || res.Header.Get("Content-Length") == "" { + t.Fatalf("HEAD status=%d content-length=%q", res.StatusCode, res.Header.Get("Content-Length")) + } + + res, err = app.Test(httptest.NewRequest(http.MethodOptions, "/webdav/data/private/config.json", nil)) + if err != nil { + t.Fatal(err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusNoContent && res.StatusCode != http.StatusOK { + t.Fatalf("OPTIONS status=%d", res.StatusCode) + } + if res.Header.Get("DAV") == "" { + t.Fatalf("OPTIONS should be handled by the WebDAV library, DAV header is empty") + } + + req = httptest.NewRequest("MKCOL", "/webdav/data/folder", nil) + res, err = app.Test(req) + if err != nil { + t.Fatal(err) + } + body, _ = io.ReadAll(res.Body) + _ = res.Body.Close() + if res.StatusCode < 200 || res.StatusCode >= 300 { + t.Fatalf("MKCOL status=%d body=%q", res.StatusCode, body) + } + if _, err := os.Stat(filepath.Join(root, "data/folder")); err != nil { + t.Fatalf("MKCOL folder missing: %v", err) + } + + req = httptest.NewRequest("PROPFIND", "/webdav/data/private/config.json", strings.NewReader("")) + req.Header.Set("Depth", "0") + res, err = app.Test(req) + if err != nil { + t.Fatal(err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusMultiStatus { + t.Fatalf("PROPFIND status=%d", res.StatusCode) + } + + req = httptest.NewRequest(http.MethodPut, "/api/v1/files?path=data/private/api.txt", strings.NewReader("typed")) + res, err = app.Test(req) + if err != nil { + t.Fatal(err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusNoContent { + t.Fatalf("typed PUT status=%d", res.StatusCode) + } + res, err = app.Test(httptest.NewRequest(http.MethodGet, "/api/v1/files?path=data/private/api.txt", nil)) + if err != nil { + t.Fatal(err) + } + body, _ = io.ReadAll(res.Body) + _ = res.Body.Close() + if res.StatusCode != http.StatusOK || string(body) != "typed" { + t.Fatalf("typed GET status=%d body=%q", res.StatusCode, body) + } +} + +func TestDevFilePathRejectsTraversal(t *testing.T) { + if _, err := devFilePath(t.TempDir(), "../escape"); err == nil { + t.Fatal("expected traversal to be rejected") + } +} + +func TestDevServerFileAuth(t *testing.T) { + root := t.TempDir() + broadcast := domain.NewHub() + go broadcast.Run() + app := newDevApp(root, broadcast, &devTriggerQueue{broadcast: broadcast}, devAuth{ + user: devTestAuth{}, + runtime: devTestAuth{}, + runtimeID: "smoke", + ownerID: "owner", + }) + + res, err := app.Test(httptest.NewRequest(http.MethodPut, "/api/v1/files?path=data/private/api.txt", strings.NewReader("typed"))) + if err != nil { + t.Fatal(err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusUnauthorized { + t.Fatalf("unauthenticated PUT status=%d", res.StatusCode) + } + + req := httptest.NewRequest(http.MethodPut, "/api/v1/files?path=data/private/api.txt", strings.NewReader("typed")) + req.Header.Set("Authorization", "Bearer user") + res, err = app.Test(req) + if err != nil { + t.Fatal(err) + } + _ = res.Body.Close() + if res.StatusCode != http.StatusNoContent { + t.Fatalf("authenticated PUT status=%d", res.StatusCode) + } + + res, err = app.Test(httptest.NewRequest(http.MethodGet, "/api/v1/files?path=data/private/api.txt&token=runtime", nil)) + if err != nil { + t.Fatal(err) + } + body, _ := io.ReadAll(res.Body) + _ = res.Body.Close() + if res.StatusCode != http.StatusOK || string(body) != "typed" { + t.Fatalf("runtime-token GET status=%d body=%q", res.StatusCode, body) + } +} + +type devTestAuth struct{} + +func (devTestAuth) CheckHeader(c *fiber.Ctx) (*ports.AuthContext, error) { + if c.Get(fiber.HeaderAuthorization) != "Bearer user" { + return nil, fmt.Errorf("missing token") + } + return &ports.AuthContext{Subject: "owner"}, nil +} + +func (devTestAuth) CheckQuery(runtimeID string, token string) (*ports.AuthContext, error) { + if runtimeID != "smoke" || token != "runtime" { + return nil, fmt.Errorf("invalid token") + } + return &ports.AuthContext{Subject: "owner", RuntimeID: runtimeID}, nil +} + +func (devTestAuth) GenerateQueryToken(string, string) string { return "runtime" } diff --git a/apps/druid/adapters/cli/client/list.go b/apps/druid/adapters/cli/client/list.go new file mode 100644 index 00000000..d324acab --- /dev/null +++ b/apps/druid/adapters/cli/client/list.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var ListCommand = &cobra.Command{ + Use: "list", + Short: "List scrolls and status from the daemon", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + scrolls, err := daemon.ListScrolls(cmd.Context()) + if err != nil { + return err + } + return printScrolls(scrolls) + }, +} diff --git a/apps/druid-client/adapters/cli/output.go b/apps/druid/adapters/cli/client/output.go similarity index 97% rename from apps/druid-client/adapters/cli/output.go rename to apps/druid/adapters/cli/client/output.go index 38029f99..25fc98b8 100644 --- a/apps/druid-client/adapters/cli/output.go +++ b/apps/druid/adapters/cli/client/output.go @@ -1,4 +1,4 @@ -package cli +package client import ( "encoding/json" diff --git a/apps/druid/adapters/cli/client/ports.go b/apps/druid/adapters/cli/client/ports.go new file mode 100644 index 00000000..6a962ef0 --- /dev/null +++ b/apps/druid/adapters/cli/client/ports.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var PortsCommand = &cobra.Command{ + Use: "ports ", + Short: "Show runtime port status for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + ports, err := daemon.GetScrollPorts(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(ports) + }, +} diff --git a/apps/druid/adapters/cli/client/register.go b/apps/druid/adapters/cli/client/register.go new file mode 100644 index 00000000..e411ebb1 --- /dev/null +++ b/apps/druid/adapters/cli/client/register.go @@ -0,0 +1,76 @@ +package client + +import ( + "context" + "fmt" + + "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/spf13/cobra" +) + +type RuntimeDaemon interface { + CreateScroll(ctx context.Context, name string, artifact string, registryCredentials []api.RegistryCredential) (*api.RuntimeScroll, error) + ListScrolls(ctx context.Context) ([]api.RuntimeScroll, error) + GetScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) + DeleteScroll(ctx context.Context, id string) (*api.DeletedScroll, error) + RunScrollCommand(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) + GetScrollPorts(ctx context.Context, id string) ([]api.RuntimePortStatus, error) + StartScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) + StopScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) + GetScrollRoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) + ApplyScrollRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) + EnableWatch(ctx context.Context, id string, request api.DevWatchRequest) (*api.DevWatchResponse, error) + DisableWatch(ctx context.Context, id string) (*api.DevWatchResponse, error) + WatchStatus(ctx context.Context, id string) (*api.DevWatchStatus, error) +} + +type Config struct { + Daemon func() (RuntimeDaemon, error) + RegistryCredentials func() []api.RegistryCredential +} + +var config Config + +func Register(root *cobra.Command, cfg Config) { + config = cfg + RoutingCommand.AddCommand(RoutingTargetsCommand, RoutingApplyCommand) + root.AddCommand( + CreateCommand, + DeleteCommand, + DescribeCommand, + DevCommand, + ListCommand, + PortsCommand, + RunCommand, + StartCommand, + StopCommand, + RoutingCommand, + ) +} + +func RegistryCredentials(in []domain.RegistryCredential) []api.RegistryCredential { + out := make([]api.RegistryCredential, 0, len(in)) + for _, credential := range in { + out = append(out, api.RegistryCredential{ + Host: credential.Host, + Username: credential.Username, + Password: credential.Password, + }) + } + return out +} + +func runtimeDaemonClient() (RuntimeDaemon, error) { + if config.Daemon == nil { + return nil, fmt.Errorf("client daemon is not configured") + } + return config.Daemon() +} + +func registryCredentials() []api.RegistryCredential { + if config.RegistryCredentials == nil { + return nil + } + return config.RegistryCredentials() +} diff --git a/apps/druid/adapters/cli/client/routing.go b/apps/druid/adapters/cli/client/routing.go new file mode 100644 index 00000000..e06c4dca --- /dev/null +++ b/apps/druid/adapters/cli/client/routing.go @@ -0,0 +1,8 @@ +package client + +import "github.com/spf13/cobra" + +var RoutingCommand = &cobra.Command{ + Use: "routing", + Short: "Inspect or apply daemon runtime routing", +} diff --git a/apps/druid/adapters/cli/client/routing_apply.go b/apps/druid/adapters/cli/client/routing_apply.go new file mode 100644 index 00000000..36a69990 --- /dev/null +++ b/apps/druid/adapters/cli/client/routing_apply.go @@ -0,0 +1,100 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/highcard-dev/daemon/internal/api" + routingutil "github.com/highcard-dev/daemon/internal/routing" + "github.com/spf13/cobra" +) + +var routingApplyFile string +var routingApplyPublishes []string + +var RoutingApplyCommand = &cobra.Command{ + Use: "apply ", + Short: "Persist assigned public routing for a scroll", + Example: ` druid routing apply my-scroll -p 8080:http + druid routing apply my-scroll -p 8080:80 + druid routing apply my-scroll -p 127.0.0.1:8080:http + druid routing apply my-scroll -p 8443:http/https + druid routing apply my-scroll --file routing.json + cat routing.json | druid routing apply my-scroll --file -`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + assignments, err := routingAssignmentsForApply(cmd.Context(), daemon, args[0], routingApplyFile, routingApplyPublishes) + if err != nil { + return err + } + scroll, err := daemon.ApplyScrollRouting(cmd.Context(), args[0], assignments) + if err != nil { + return err + } + return printJSON(scroll) + }, +} + +func init() { + RoutingApplyCommand.Flags().StringVarP(&routingApplyFile, "file", "f", "", "JSON file with an assignments array, or '-' for stdin") + RoutingApplyCommand.Flags().StringArrayVarP(&routingApplyPublishes, "publish", "p", nil, "Publish routing as [external-ip:]public-port:target[/protocol]") +} + +func routingAssignmentsForApply(ctx context.Context, daemon RuntimeDaemon, id string, file string, publishes []string) ([]api.RuntimeRouteAssignment, error) { + if file != "" && len(publishes) > 0 { + return nil, fmt.Errorf("--file and --publish cannot be used together") + } + if len(publishes) > 0 { + targets, err := daemon.GetScrollRoutingTargets(ctx, id) + if err != nil { + return nil, err + } + return routingutil.AssignmentsFromPublishes(publishes, targets, id) + } + if file == "" { + return nil, fmt.Errorf("--file or --publish is required") + } + data, err := readRoutingAssignmentsFile(file) + if err != nil { + return nil, err + } + return parseRoutingAssignments(data) +} + +func readRoutingAssignmentsFile(file string) ([]byte, error) { + if file == "-" { + data, err := io.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("failed to read routing assignments from stdin: %w", err) + } + return data, nil + } + data, err := os.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("failed to read routing assignments file: %w", err) + } + return data, nil +} + +func parseRoutingAssignments(data []byte) ([]api.RuntimeRouteAssignment, error) { + if bytes.HasPrefix(bytes.TrimSpace(data), []byte("{")) { + var request api.ApplyRoutingRequest + if err := json.Unmarshal(data, &request); err != nil { + return nil, fmt.Errorf("failed to unmarshal routing assignments: %w", err) + } + return request.Assignments, nil + } + var assignments []api.RuntimeRouteAssignment + if err := json.Unmarshal(data, &assignments); err != nil { + return nil, fmt.Errorf("failed to unmarshal routing assignments: %w", err) + } + return assignments, nil +} diff --git a/apps/druid/adapters/cli/client/routing_publish.go b/apps/druid/adapters/cli/client/routing_publish.go new file mode 100644 index 00000000..09ee46f7 --- /dev/null +++ b/apps/druid/adapters/cli/client/routing_publish.go @@ -0,0 +1,20 @@ +package client + +import ( + "context" + + "github.com/highcard-dev/daemon/internal/api" + routingutil "github.com/highcard-dev/daemon/internal/routing" +) + +func applyPublishedRouting(ctx context.Context, daemon RuntimeDaemon, id string, publishes []string) (*api.RuntimeScroll, error) { + targets, err := daemon.GetScrollRoutingTargets(ctx, id) + if err != nil { + return nil, err + } + assignments, err := routingutil.AssignmentsFromPublishes(publishes, targets, id) + if err != nil { + return nil, err + } + return daemon.ApplyScrollRouting(ctx, id, assignments) +} diff --git a/apps/druid/adapters/cli/client/routing_publish_test.go b/apps/druid/adapters/cli/client/routing_publish_test.go new file mode 100644 index 00000000..a4380527 --- /dev/null +++ b/apps/druid/adapters/cli/client/routing_publish_test.go @@ -0,0 +1,197 @@ +package client + +import ( + "context" + "os" + "strings" + "testing" + + "github.com/highcard-dev/daemon/internal/api" +) + +func TestPublishFlagsAreExposed(t *testing.T) { + if flag := CreateCommand.Flags().Lookup("publish"); flag == nil || flag.Shorthand != "p" { + t.Fatal("druid create should expose -p/--publish") + } + if flag := RoutingApplyCommand.Flags().Lookup("publish"); flag == nil || flag.Shorthand != "p" { + t.Fatal("druid routing apply should expose -p/--publish") + } +} + +func TestCreateWithPublishAppliesRoutingWithoutStarting(t *testing.T) { + daemon := &fakeRoutingDaemon{ + targets: []api.RuntimeRoutingTarget{{Name: "web-http", PortName: "http", Port: 80, Protocol: "http"}}, + } + + scroll, err := createScrollWithRouting(context.Background(), daemon, "artifact", "scroll-a", nil, []string{"8080:http"}) + if err != nil { + t.Fatal(err) + } + if scroll.Id != "scroll-a" { + t.Fatalf("scroll id = %s, want scroll-a", scroll.Id) + } + if daemon.createCalls != 1 || daemon.targetCalls != 1 || daemon.applyCalls != 1 { + t.Fatalf("calls create=%d targets=%d apply=%d", daemon.createCalls, daemon.targetCalls, daemon.applyCalls) + } + if daemon.startCalls != 0 { + t.Fatalf("start calls = %d, want 0", daemon.startCalls) + } + assertAssignment(t, daemon.applied[0], "web-http", "http", "127.0.0.1", 8080, "localhost", "http", "http://localhost:8080") +} + +func TestCreateWithoutPublishSkipsRouting(t *testing.T) { + daemon := &fakeRoutingDaemon{} + + if _, err := createScrollWithRouting(context.Background(), daemon, "artifact", "scroll-a", nil, nil); err != nil { + t.Fatal(err) + } + if daemon.createCalls != 1 || daemon.targetCalls != 0 || daemon.applyCalls != 0 { + t.Fatalf("calls create=%d targets=%d apply=%d", daemon.createCalls, daemon.targetCalls, daemon.applyCalls) + } +} + +func TestRoutingAssignmentsForApply(t *testing.T) { + daemon := &fakeRoutingDaemon{ + targets: []api.RuntimeRoutingTarget{{Name: "web-http", PortName: "http", Port: 80, Protocol: "http"}}, + } + + assignments, err := routingAssignmentsForApply(context.Background(), daemon, "scroll-a", "", []string{"8080:http"}) + if err != nil { + t.Fatal(err) + } + assertAssignment(t, assignments[0], "web-http", "http", "127.0.0.1", 8080, "localhost", "http", "http://localhost:8080") + + _, err = routingAssignmentsForApply(context.Background(), daemon, "scroll-a", "routes.json", []string{"8080:http"}) + if err == nil || !strings.Contains(err.Error(), "cannot be used together") { + t.Fatalf("error = %v, want mutual exclusion", err) + } +} + +func TestParseRoutingAssignmentsAcceptsEnvelopeAndArray(t *testing.T) { + for _, data := range [][]byte{ + []byte(`{"assignments":[{"name":"web-http","port_name":"http","public_port":8080}]}`), + []byte(`[{"name":"web-http","port_name":"http","public_port":8080}]`), + } { + assignments, err := parseRoutingAssignments(data) + if err != nil { + t.Fatal(err) + } + if len(assignments) != 1 || value(assignments[0].Name) != "web-http" { + t.Fatalf("assignments = %#v", assignments) + } + } +} + +func TestReadRoutingAssignmentsFileReadsStdin(t *testing.T) { + oldStdin := os.Stdin + read, write, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdin = read + t.Cleanup(func() { + os.Stdin = oldStdin + _ = read.Close() + }) + if _, err := write.WriteString(`[{"name":"web-http"}]`); err != nil { + t.Fatal(err) + } + if err := write.Close(); err != nil { + t.Fatal(err) + } + data, err := readRoutingAssignmentsFile("-") + if err != nil { + t.Fatal(err) + } + if string(data) != `[{"name":"web-http"}]` { + t.Fatalf("stdin data = %q", string(data)) + } +} + +func assertAssignment(t *testing.T, assignment api.RuntimeRouteAssignment, name string, portName string, externalIP string, publicPort int, host string, protocol string, url string) { + t.Helper() + if value(assignment.Name) != name || value(assignment.PortName) != portName || value(assignment.ExternalIp) != externalIP || intValue(assignment.PublicPort) != publicPort || value(assignment.Host) != host || value(assignment.Protocol) != protocol || value(assignment.Url) != url { + t.Fatalf("assignment = %#v", assignment) + } +} + +func value(in *string) string { + if in == nil { + return "" + } + return *in +} + +func intValue(in *int) int { + if in == nil { + return 0 + } + return *in +} + +type fakeRoutingDaemon struct { + targets []api.RuntimeRoutingTarget + applied []api.RuntimeRouteAssignment + createCalls int + targetCalls int + applyCalls int + startCalls int +} + +func (f *fakeRoutingDaemon) CreateScroll(ctx context.Context, name string, artifact string, registryCredentials []api.RegistryCredential) (*api.RuntimeScroll, error) { + f.createCalls++ + return &api.RuntimeScroll{Id: name, Artifact: artifact, Root: "/root", ScrollName: name, Status: api.Created}, nil +} + +func (f *fakeRoutingDaemon) ListScrolls(ctx context.Context) ([]api.RuntimeScroll, error) { + return nil, nil +} + +func (f *fakeRoutingDaemon) GetScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) { + return &api.RuntimeScroll{Id: id, Status: api.Created}, nil +} + +func (f *fakeRoutingDaemon) DeleteScroll(ctx context.Context, id string) (*api.DeletedScroll, error) { + return nil, nil +} + +func (f *fakeRoutingDaemon) RunScrollCommand(ctx context.Context, id string, command string) (*api.RuntimeScroll, error) { + return nil, nil +} + +func (f *fakeRoutingDaemon) GetScrollPorts(ctx context.Context, id string) ([]api.RuntimePortStatus, error) { + return nil, nil +} + +func (f *fakeRoutingDaemon) StartScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) { + f.startCalls++ + return nil, nil +} + +func (f *fakeRoutingDaemon) StopScroll(ctx context.Context, id string) (*api.RuntimeScroll, error) { + return nil, nil +} + +func (f *fakeRoutingDaemon) GetScrollRoutingTargets(ctx context.Context, id string) ([]api.RuntimeRoutingTarget, error) { + f.targetCalls++ + return f.targets, nil +} + +func (f *fakeRoutingDaemon) ApplyScrollRouting(ctx context.Context, id string, assignments []api.RuntimeRouteAssignment) (*api.RuntimeScroll, error) { + f.applyCalls++ + f.applied = assignments + routing := append([]api.RuntimeRouteAssignment(nil), assignments...) + return &api.RuntimeScroll{Id: id, Status: api.Created, Routing: &routing}, nil +} + +func (f *fakeRoutingDaemon) EnableWatch(ctx context.Context, id string, request api.DevWatchRequest) (*api.DevWatchResponse, error) { + return &api.DevWatchResponse{Status: "enabled", Enabled: true, WatchedPaths: request.WatchPaths}, nil +} + +func (f *fakeRoutingDaemon) DisableWatch(ctx context.Context, id string) (*api.DevWatchResponse, error) { + return &api.DevWatchResponse{Status: "disabled", Enabled: false}, nil +} + +func (f *fakeRoutingDaemon) WatchStatus(ctx context.Context, id string) (*api.DevWatchStatus, error) { + return &api.DevWatchStatus{Enabled: false, WatchedPaths: []string{}}, nil +} diff --git a/apps/druid/adapters/cli/client/routing_targets.go b/apps/druid/adapters/cli/client/routing_targets.go new file mode 100644 index 00000000..fc420c65 --- /dev/null +++ b/apps/druid/adapters/cli/client/routing_targets.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var RoutingTargetsCommand = &cobra.Command{ + Use: "targets ", + Short: "Show backend service targets for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + targets, err := daemon.GetScrollRoutingTargets(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(targets) + }, +} diff --git a/apps/druid/adapters/cli/client/run.go b/apps/druid/adapters/cli/client/run.go new file mode 100644 index 00000000..cb4237a5 --- /dev/null +++ b/apps/druid/adapters/cli/client/run.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var RunCommand = &cobra.Command{ + Use: "run ", + Short: "Run a command on a daemon-managed scroll", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + scroll, err := daemon.RunScrollCommand(cmd.Context(), args[0], args[1]) + if err != nil { + return err + } + return printJSON(scroll) + }, +} diff --git a/apps/druid/adapters/cli/client/start.go b/apps/druid/adapters/cli/client/start.go new file mode 100644 index 00000000..5057e516 --- /dev/null +++ b/apps/druid/adapters/cli/client/start.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var StartCommand = &cobra.Command{ + Use: "start ", + Short: "Start the daemon-managed scroll serve command", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + scroll, err := daemon.StartScroll(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(scroll) + }, +} diff --git a/apps/druid/adapters/cli/client/stop.go b/apps/druid/adapters/cli/client/stop.go new file mode 100644 index 00000000..417a8d6b --- /dev/null +++ b/apps/druid/adapters/cli/client/stop.go @@ -0,0 +1,20 @@ +package client + +import "github.com/spf13/cobra" + +var StopCommand = &cobra.Command{ + Use: "stop ", + Short: "Stop daemon-managed runtime workloads for a scroll", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + daemon, err := runtimeDaemonClient() + if err != nil { + return err + } + scroll, err := daemon.StopScroll(cmd.Context(), args[0]) + if err != nil { + return err + } + return printJSON(scroll) + }, +} diff --git a/apps/druid/adapters/cli/daemon.go b/apps/druid/adapters/cli/daemon.go new file mode 100644 index 00000000..fc7b6936 --- /dev/null +++ b/apps/druid/adapters/cli/daemon.go @@ -0,0 +1,210 @@ +package cli + +import ( + "net" + "os" + "path/filepath" + "strings" + + "github.com/gofiber/fiber/v2" + runtimehandlers "github.com/highcard-dev/daemon/apps/druid/adapters/http/handlers" + appservices "github.com/highcard-dev/daemon/apps/druid/core/services" + "github.com/highcard-dev/daemon/internal/callbackapi" + "github.com/highcard-dev/daemon/internal/core/services" + runtimebackend "github.com/highcard-dev/daemon/internal/runtime" + runtimedocker "github.com/highcard-dev/daemon/internal/runtime/docker" + runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +var runtimeSocket string +var k8sNamespace string +var k8sStorageClass string +var k8sPullImage string +var k8sRegistrySecret string +var hubbleRelayAddr string +var k8sKubeconfig string +var runtimeListen string +var runtimePublicListen string +var runtimeInternalToken string +var runtimeWorkerCallbackListen string +var runtimeWorkerCallbackURL string +var runtimeWorkerDaemonURL string +var runtimeAuthJWKSURL string +var runtimePublicJWKSURL string +var dockerWorkerImage string +var dockerStorage string +var dockerBindRoot string +var dockerVolumePrefix string + +var DaemonCommand = &cobra.Command{ + Use: "daemon", + Aliases: []string{"serve"}, + Short: "Run the multi-scroll runtime daemon", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runRuntimeDaemon() + }, +} + +func init() { + RootCmd.AddCommand(DaemonCommand) + DaemonCommand.Flags().StringVar(&runtimeSocket, "socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path") + DaemonCommand.Flags().StringVar(&runtimeListen, "listen", "", "Optional management HTTP listen address, for example :8081") + DaemonCommand.Flags().StringVar(&runtimePublicListen, "public-listen", "", "Optional public dashboard HTTP listen address, for example :8082") + DaemonCommand.Flags().StringVar(&runtimeInternalToken, "internal-token", "", "Optional bearer token required for management HTTP API requests") + DaemonCommand.Flags().StringVar(&runtimeWorkerCallbackListen, "worker-callback-listen", "", "Optional internal worker callback listen address, for example :8083") + DaemonCommand.Flags().StringVar(&runtimeWorkerCallbackURL, "worker-callback-url", "", "URL workers use to call back to this daemon") + DaemonCommand.Flags().StringVar(&runtimeWorkerDaemonURL, "worker-daemon-url", "", "URL dev workers use for daemon management API calls") + DaemonCommand.Flags().StringVar(&runtimeAuthJWKSURL, "auth-jwks-url", "", "JWKS URL used to validate customer JWTs") + DaemonCommand.Flags().StringVar(&runtimePublicJWKSURL, "public-jwks-url", "", "Public JWKS URL workers use to validate daemon runtime tokens") + DaemonCommand.Flags().StringVar(&dockerWorkerImage, "docker-worker-image", "", "Docker image used for sibling worker containers (default: DRUID_DOCKER_WORKER_IMAGE)") + DaemonCommand.Flags().StringVar(&dockerStorage, "docker-storage", "", "Docker runtime storage mode: volume or bind (default: DRUID_DOCKER_STORAGE or volume)") + DaemonCommand.Flags().StringVar(&dockerBindRoot, "docker-bind-root", "", "Host root for Docker bind storage (default: DRUID_DOCKER_BIND_ROOT)") + DaemonCommand.Flags().StringVar(&dockerVolumePrefix, "docker-volume-prefix", "", "Docker volume name prefix (default: DRUID_DOCKER_VOLUME_PREFIX or druid)") + DaemonCommand.Flags().StringVar(&runtimeStateDir, "state-dir", "", "Runtime state directory (default: ~/.druid/runtime)") + DaemonCommand.Flags().StringVar(&runtimeBackendName, "runtime", "docker", "Default runtime backend. Valid values: docker, kubernetes") + DaemonCommand.Flags().StringVar(&k8sNamespace, "k8s-namespace", "", "Kubernetes namespace for runtime resources (default: service account namespace or DRUID_K8S_NAMESPACE)") + DaemonCommand.Flags().StringVar(&k8sStorageClass, "k8s-storage-class", "", "Kubernetes storage class for runtime PVCs (default: DRUID_K8S_STORAGE_CLASS)") + DaemonCommand.Flags().StringVar(&k8sPullImage, "k8s-pull-image", "", "Kubernetes image used for OCI pull materialization Jobs (default: DRUID_K8S_PULL_IMAGE)") + DaemonCommand.Flags().StringVar(&k8sRegistrySecret, "k8s-registry-secret", "", "Kubernetes imagePullSecret used by runtime Jobs (default: DRUID_K8S_REGISTRY_SECRET)") + DaemonCommand.Flags().StringVar(&k8sKubeconfig, "k8s-kubeconfig", "", "Kubernetes kubeconfig path for out-of-cluster runtime access (default: DRUID_K8S_KUBECONFIG, KUBECONFIG, or ~/.kube/config)") + DaemonCommand.Flags().StringVar(&hubbleRelayAddr, "hubble-relay-addr", "", "Hubble Relay gRPC address for Kubernetes port traffic (default: DRUID_HUBBLE_RELAY_ADDR or hubble-relay.kube-system.svc.cluster.local:80)") +} + +func runRuntimeDaemon() error { + kubernetesConfig := runtimekubernetes.Config{ + Namespace: k8sNamespace, + StorageClass: k8sStorageClass, + PullImage: k8sPullImage, + RegistrySecret: k8sRegistrySecret, + HubbleRelayAddr: hubbleRelayAddr, + Kubeconfig: k8sKubeconfig, + } + dockerConfig := runtimedocker.Config{WorkerImage: dockerWorkerImage, Storage: dockerStorage, BindRoot: dockerBindRoot, VolumePrefix: dockerVolumePrefix} + logManager := services.NewLogManager() + consoleService := services.NewConsoleManager(logManager) + runtime, err := runtimebackend.NewRuntime(runtimeBackendName, consoleService, runtimeStateDir, runtimebackend.WithKubernetesConfig(kubernetesConfig), runtimebackend.WithDockerConfig(dockerConfig)) + if err != nil { + return err + } + manager := services.NewRuntimeScrollManager(runtime.Store) + supervisor := appservices.NewRuntimeSupervisor(runtime.Store, manager, runtime.Backend) + callbacks := appservices.NewWorkerCallbackManager() + if runtimeWorkerCallbackURL == "" { + runtimeWorkerCallbackURL = os.Getenv("DRUID_WORKER_CALLBACK_URL") + } + supervisor.SetWorkerCallbacks(callbacks, runtimeWorkerCallbackURL) + if runtimeWorkerDaemonURL == "" { + runtimeWorkerDaemonURL = os.Getenv("DRUID_WORKER_DAEMON_URL") + } + if runtimeAuthJWKSURL == "" { + runtimeAuthJWKSURL = os.Getenv("DRUID_AUTH_JWKS_URL") + } + if runtimePublicJWKSURL == "" { + runtimePublicJWKSURL = os.Getenv("DRUID_PUBLIC_JWKS_URL") + } + if runtimeInternalToken == "" { + runtimeInternalToken = os.Getenv("DRUID_INTERNAL_TOKEN") + } + supervisor.SetDevWorkerConfig(runtimeWorkerDaemonURL, runtimeInternalToken, runtimeAuthJWKSURL, runtimePublicJWKSURL) + if err := supervisor.Start(); err != nil { + return err + } + + authorizer, err := services.NewAuthorizer(runtimeAuthJWKSURL, "") + if err != nil { + return err + } + scrollHandler := runtimehandlers.NewScrollHandler(supervisor, consoleService, logManager, authorizer) + websocketHandler := runtimehandlers.NewWebsocketHandler(consoleService) + websocketHandler.SetScrollHandler(scrollHandler) + websocketHandler.SetAuthorizer(authorizer) + handlers := runtimehandlers.RouteHandlers{ + Server: runtimehandlers.NewRuntimeServer( + runtimehandlers.NewHealthHandler(), + scrollHandler, + ), + Websocket: websocketHandler, + } + + managementApp := fiber.New(fiber.Config{DisableStartupMessage: true}) + if runtimeInternalToken != "" { + managementApp.Use(func(c *fiber.Ctx) error { + path := c.Path() + if path == "/health" || path == "/api/v1/health" { + return c.Next() + } + token := strings.TrimPrefix(c.Get("Authorization"), "Bearer ") + if token == "" { + token = c.Get("X-Druid-Internal-Token") + } + if token != runtimeInternalToken { + return fiber.NewError(fiber.StatusUnauthorized, "invalid internal runtime token") + } + return c.Next() + }) + } + runtimehandlers.RegisterManagementRoutes(managementApp, handlers) + + var publicApp *fiber.App + if runtimePublicListen != "" { + publicApp = fiber.New(fiber.Config{DisableStartupMessage: true}) + runtimehandlers.RegisterPublicRoutes(publicApp, handlers) + } + var callbackApp *fiber.App + if runtimeWorkerCallbackListen == "" { + runtimeWorkerCallbackListen = os.Getenv("DRUID_WORKER_CALLBACK_LISTEN") + } + if runtimeWorkerCallbackListen != "" { + callbackApp = fiber.New(fiber.Config{DisableStartupMessage: true}) + callbackapi.RegisterHandlers(callbackApp, runtimeCallbackHandler{callbacks: callbacks}) + } + return listenRuntimeHTTP(managementApp, publicApp, callbackApp, runtime.Store.StateDir()) +} + +func listenRuntimeHTTP(managementApp *fiber.App, publicApp *fiber.App, callbackApp *fiber.App, stateDir string) error { + errCh := make(chan error, 4) + go func() { + errCh <- listenRuntimeDaemon(managementApp, stateDir) + }() + if runtimeListen != "" { + go func() { + logger.Log().Info("Starting runtime management listener", zap.String("listen", runtimeListen), zap.String("stateDir", stateDir)) + errCh <- managementApp.Listen(runtimeListen) + }() + } + if publicApp != nil { + go func() { + logger.Log().Info("Starting runtime public listener", zap.String("listen", runtimePublicListen), zap.String("stateDir", stateDir)) + errCh <- publicApp.Listen(runtimePublicListen) + }() + } + if callbackApp != nil { + go func() { + logger.Log().Info("Starting runtime worker callback listener", zap.String("listen", runtimeWorkerCallbackListen), zap.String("stateDir", stateDir)) + errCh <- callbackApp.Listen(runtimeWorkerCallbackListen) + }() + } + return <-errCh +} + +func listenRuntimeDaemon(app *fiber.App, stateDir string) error { + if runtimeSocket == "" { + runtimeSocket = utils.DefaultRuntimeSocketPath() + } + if err := os.MkdirAll(filepath.Dir(runtimeSocket), 0755); err != nil { + return err + } + _ = os.Remove(runtimeSocket) + listener, err := net.Listen("unix", runtimeSocket) + if err != nil { + return err + } + defer os.Remove(runtimeSocket) + logger.Log().Info("Starting runtime daemon", zap.String("socket", runtimeSocket), zap.String("stateDir", stateDir)) + return app.Listener(listener) +} diff --git a/apps/druid/adapters/cli/login.go b/apps/druid/adapters/cli/login.go new file mode 100644 index 00000000..a01831b1 --- /dev/null +++ b/apps/druid/adapters/cli/login.go @@ -0,0 +1,71 @@ +package cli + +import ( + "fmt" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var registryHost string +var registryUser string +var registryPassword string + +var LoginCommand = &cobra.Command{ + Use: "login", + Short: "Login to OCI registry", + Long: `Add or update registry credentials in the configuration. +Supports multiple registries with path-based credential matching. + +Examples: + druid login --host registry-1.docker.io -u user -p pass + druid login --host artifacts.druid.gg/project1 -u user1 -p pass1 + druid login --host artifacts.druid.gg/project2 -u user2 -p pass2`, + RunE: func(cmd *cobra.Command, args []string) error { + + if err := registry.ValidateCredentials(registryHost, registryUser, registryPassword); err != nil { + return fmt.Errorf("login failed: %w", err) + } + + cmd.Println("Login succeeded") + + var registries []domain.RegistryCredential + viper.UnmarshalKey("registries", ®istries) + + newCred := domain.RegistryCredential{ + Host: registryHost, + Username: registryUser, + Password: registryPassword, + } + + found := false + for i := range registries { + if registries[i].Host == registryHost { + registries[i] = newCred + found = true + break + } + } + + if !found { + registries = append(registries, newCred) + } + + viper.Set("registries", registries) + + return viper.WriteConfig() + }, +} + +func init() { + RootCmd.AddCommand(LoginCommand) + LoginCommand.Flags().StringVar(®istryHost, "host", "", "OCI registry host (e.g., artifacts.druid.gg/project1)") + LoginCommand.Flags().StringVarP(®istryUser, "user", "u", "", "username") + LoginCommand.Flags().StringVarP(®istryPassword, "password", "p", "", "User password") + + LoginCommand.MarkFlagRequired("host") + LoginCommand.MarkFlagRequired("user") + LoginCommand.MarkFlagRequired("password") +} diff --git a/apps/druid/adapters/cli/pull.go b/apps/druid/adapters/cli/pull.go new file mode 100644 index 00000000..16c8e3df --- /dev/null +++ b/apps/druid/adapters/cli/pull.go @@ -0,0 +1,38 @@ +package cli + +import ( + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" +) + +var pullNoData bool + +var PullCommand = &cobra.Command{ + Use: "pull [dir]", + Short: "Pull a scroll from an OCI registry (tag or digest)", + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + artifact := args[0] + dir := currentWorkingDir() + if len(args) == 2 { + dir = args[1] + } + + registryClient := registry.NewOciClient(loadRegistryStore()) + + err := registryClient.PullSelective(dir, artifact, !pullNoData, nil) + if err != nil { + logger.Log().Error("Failed to pull from registry") + return err + } + + logger.Log().Info("Pulled from registry") + return nil + }, +} + +func init() { + RootCmd.AddCommand(PullCommand) + PullCommand.Flags().BoolVar(&pullNoData, "no-data", false, "Skip scroll data files") +} diff --git a/apps/druid/adapters/cli/push.go b/apps/druid/adapters/cli/push.go new file mode 100644 index 00000000..fb76238e --- /dev/null +++ b/apps/druid/adapters/cli/push.go @@ -0,0 +1,115 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +var pushMinRAM string +var pushMinCPU string +var pushMinDisk string +var pushImage string +var pushScrollPorts []string +var pushPackMeta bool +var pushSmart bool +var pushCategory string + +var PushCommand = &cobra.Command{ + Use: "push [artifact] [dir]", + Short: "Generate OCI Artifacts and push to a remote registry", + Args: cobra.MaximumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + credStore := loadRegistryStore() + + fullPath := currentWorkingDir() + artifact := "" + switch len(args) { + case 1: + if args[0] != "" { + if _, err := os.Stat(filepath.Join(args[0], "scroll.yaml")); err == nil { + fullPath = args[0] + } else { + artifact = args[0] + } + } + case 2: + artifact = args[0] + fullPath = args[1] + } + + scroll, err := domain.NewScroll(fullPath) + + if err != nil { + return err + } + + repo := scroll.Name + tag := scroll.AppVersion + + if artifact != "" { + repo, tag = utils.SplitArtifact(artifact) + } + + logger.Log().Info("Pushing "+repo+":"+tag+" to registry", zap.String("path", fullPath)) + + ociClient := registry.NewOciClient(credStore) + + overrides := map[string]string{} + if pushMinRAM != "" { + overrides["gg.druid.scroll.minRam"] = pushMinRAM + } + if pushMinCPU != "" { + overrides["gg.druid.scroll.minCpu"] = pushMinCPU + } + if pushMinDisk != "" { + overrides["gg.druid.scroll.minDisk"] = pushMinDisk + } + if pushImage != "" { + overrides["gg.druid.scroll.image"] = pushImage + } + if pushSmart { + overrides["gg.druid.scroll.smart"] = "true" + } + if pushCategory != "" { + overrides["gg.druid.scroll.category"] = pushCategory + } + for _, p := range pushScrollPorts { + parts := strings.Split(p, "=") + name := parts[0] + port := "0" + if len(parts) == 2 { + port = parts[1] + } + overrides[fmt.Sprintf("gg.druid.scroll.port.%s", name)] = port + } + + _, err = ociClient.Push(fullPath, repo, tag, overrides, pushPackMeta, &scroll.File) + if err != nil { + return err + } + + logger.Log().Info("Pushed "+scroll.Name+" to registry", zap.String("path", fullPath)) + return nil + }, +} + +func init() { + RootCmd.AddCommand(PushCommand) + PushCommand.Flags().StringVarP(&pushMinRAM, "min-ram", "r", pushMinRAM, "Minimum RAM required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minRam)") + PushCommand.Flags().StringVarP(&pushMinCPU, "min-cpu", "c", pushMinCPU, "Minimum CPU required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minCpu)") + PushCommand.Flags().StringVarP(&pushMinDisk, "min-disk", "d", pushMinDisk, "Minimum Disk required to run the application. (Will be added as a manifest annotation gg.druid.scroll.minDisk)") + PushCommand.Flags().BoolVarP(&pushSmart, "smart", "s", false, "Indicates, if the scroll is able to run as a smart deployment (Will be added as a manifest annotation gg.druid.scroll.smart)") + PushCommand.Flags().StringVar(&pushCategory, "category", pushCategory, "Category of the scroll. (Will be added as a manifest annotation gg.druid.scroll.category)") + PushCommand.Flags().StringVarP(&pushImage, "image", "i", pushImage, "Image to use for the scroll. (Will be added as a manifest annotation gg.druid.scroll.image)") + PushCommand.Flags().StringSliceVarP(&pushScrollPorts, "port", "p", pushScrollPorts, "Ports to expose. Format webserver=80, dns=53/udp or just ftp (Will be added as a manifest annotation gg.druid.scroll.ports.)") + PushCommand.Flags().BoolVarP(&pushPackMeta, "pack-meta", "m", pushPackMeta, "Pack the meta folder into the scroll.") +} diff --git a/apps/druid/adapters/cli/push_category.go b/apps/druid/adapters/cli/push_category.go new file mode 100644 index 00000000..aee18447 --- /dev/null +++ b/apps/druid/adapters/cli/push_category.go @@ -0,0 +1,44 @@ +package cli + +import ( + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils/logger" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +var pushCategoryNamePattern string + +var PushCategoryCommand = &cobra.Command{ + Use: "category", + Short: "Push locale markdown files (e.g. de-DE.md) from a scroll directory as separate OCI layers.", + Args: cobra.RangeArgs(2, 3), + RunE: func(cmd *cobra.Command, args []string) error { + credStore := loadRegistryStore() + + repo := args[0] + category := args[1] + scrollDir := currentWorkingDir() + if len(args) == 3 { + scrollDir = args[2] + } + + logger.Log().Info("Pushing "+repo+" category to registry", zap.String("scrollDir", scrollDir)) + + ociClient := registry.NewOciClient(credStore) + + _, err := ociClient.PushCategory(scrollDir, repo, category) + + if err != nil { + return err + } + + logger.Log().Info("Pushed " + repo + " category to registry") + return nil + }, +} + +func init() { + PushCommand.AddCommand(PushCategoryCommand) + PushCategoryCommand.Flags().StringVar(&pushCategoryNamePattern, "match", "", "Regexp matching file basenames to push (default: locale markdown like de-DE.md)") +} diff --git a/apps/druid/adapters/cli/root.go b/apps/druid/adapters/cli/root.go index 21f0f722..a27f1d4c 100644 --- a/apps/druid/adapters/cli/root.go +++ b/apps/druid/adapters/cli/root.go @@ -3,6 +3,10 @@ package cli import ( "os" + "github.com/highcard-dev/daemon/apps/druid/adapters/cli/client" + "github.com/highcard-dev/daemon/apps/druid/adapters/daemonclient" + "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/utils" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -10,7 +14,9 @@ import ( var envPath string var configFile string var runtimeStateDir string -var runtimeBackend string +var runtimeBackendName string +var daemonSocket string +var daemonURL string var RootCmd = &cobra.Command{ Use: "druid", @@ -25,15 +31,19 @@ var RootCmd = &cobra.Command{ func init() { cobra.OnInitialize(initConfig) - RootCmd.AddCommand(ServeCommand) - RootCmd.AddCommand(UpdateCommand) - RootCmd.AddCommand(AppVersionCmd) - RootCmd.AddCommand(VersionCmd) - RootCmd.AddCommand(ValidateCmd) - RootCmd.PersistentFlags().StringVarP(&envPath, "env-file", "e", "./.env", "Path to environment file (.env)") RootCmd.PersistentFlags().StringVar(&configFile, "config", "", "Path to config file (default: ~/.druid.yaml)") + RootCmd.PersistentFlags().StringVar(&daemonSocket, "daemon-socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path for REST-backed commands") + RootCmd.PersistentFlags().StringVar(&daemonURL, "daemon-url", "", "Runtime daemon HTTP URL for REST-backed commands") + client.Register(RootCmd, client.Config{ + Daemon: func() (client.RuntimeDaemon, error) { + return daemonclient.NewOpenAPIClientForTarget(daemonSocket, daemonURL) + }, + RegistryCredentials: func() []api.RegistryCredential { + return client.RegistryCredentials(loadRegistryStore().Credentials()) + }, + }) } func initConfig() { diff --git a/apps/druid/adapters/cli/root_test.go b/apps/druid/adapters/cli/root_test.go index fafda90e..a181a4e9 100644 --- a/apps/druid/adapters/cli/root_test.go +++ b/apps/druid/adapters/cli/root_test.go @@ -2,27 +2,44 @@ package cli import "testing" -func TestRootCommandDoesNotExposeOCICommands(t *testing.T) { - for _, name := range []string{"pull", "push", "login"} { - if cmd, _, err := RootCmd.Find([]string{name}); err == nil && cmd != nil && cmd.Name() == name { - t.Fatalf("druid should not expose %q", name) +func TestRootCommandExposesRuntimeAndOCICommands(t *testing.T) { + for _, name := range []string{"pull", "push", "login", "dev"} { + if cmd, _, err := RootCmd.Find([]string{name}); err != nil || cmd == nil || cmd.Name() != name { + t.Fatalf("druid should expose %q", name) } } + if cmd, _, err := RootCmd.Find([]string{"worker", "pull"}); err != nil || cmd == nil || cmd.Name() != "pull" { + t.Fatalf("druid should expose worker pull") + } + if cmd, _, err := RootCmd.Find([]string{"worker", "push"}); err != nil || cmd == nil || cmd.Name() != "push" { + t.Fatalf("druid should expose worker push") + } } -func TestServeCommandExposesRuntimeListeners(t *testing.T) { +func TestDaemonCommandExposesRuntimeListeners(t *testing.T) { for _, name := range []string{"tcp", "port"} { - if flag := ServeCommand.Flags().Lookup(name); flag != nil { - t.Fatalf("druid serve should not expose --%s", name) + if flag := DaemonCommand.Flags().Lookup(name); flag != nil { + t.Fatalf("druid daemon should not expose --%s", name) } } - for _, name := range []string{"socket", "listen", "public-listen", "internal-token"} { - if flag := ServeCommand.Flags().Lookup(name); flag == nil { - t.Fatalf("druid serve should expose --%s", name) + for _, name := range []string{"socket", "listen", "public-listen", "internal-token", "worker-callback-listen", "worker-callback-url", "docker-storage", "docker-bind-root", "docker-volume-prefix"} { + if flag := DaemonCommand.Flags().Lookup(name); flag == nil { + t.Fatalf("druid daemon should expose --%s", name) } } } +func TestRootCommandExposesDaemonTargets(t *testing.T) { + for _, name := range []string{"daemon-url", "daemon-socket"} { + if flag := RootCmd.PersistentFlags().Lookup(name); flag == nil { + t.Fatalf("druid should expose --%s", name) + } + } + if flag := RootCmd.PersistentFlags().Lookup("lo" + "cal"); flag != nil { + t.Fatal("druid should not expose local direct execution") + } +} + func TestRootCommandDoesNotExposeCWDFlag(t *testing.T) { if flag := RootCmd.PersistentFlags().Lookup("cwd"); flag != nil { t.Fatal("druid should not expose --cwd") diff --git a/apps/druid/adapters/cli/runtime_client.go b/apps/druid/adapters/cli/runtime_client.go new file mode 100644 index 00000000..c5404304 --- /dev/null +++ b/apps/druid/adapters/cli/runtime_client.go @@ -0,0 +1,21 @@ +package cli + +import ( + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/spf13/viper" +) + +func loadRegistryStore() *registry.CredentialStore { + var registries []domain.RegistryCredential + viper.UnmarshalKey("registries", ®istries) + if len(registries) == 0 { + host := viper.GetString("registry.host") + user := viper.GetString("registry.user") + password := viper.GetString("registry.password") + if host != "" { + registries = append(registries, domain.RegistryCredential{Host: host, Username: user, Password: password}) + } + } + return registry.NewCredentialStore(registries) +} diff --git a/apps/druid/adapters/cli/serve.go b/apps/druid/adapters/cli/serve.go deleted file mode 100644 index 72e0388f..00000000 --- a/apps/druid/adapters/cli/serve.go +++ /dev/null @@ -1,151 +0,0 @@ -package cli - -import ( - "net" - "os" - "path/filepath" - "strings" - - "github.com/gofiber/fiber/v2" - runtimehandlers "github.com/highcard-dev/daemon/apps/druid/adapters/http/handlers" - appservices "github.com/highcard-dev/daemon/apps/druid/core/services" - "github.com/highcard-dev/daemon/internal/core/services" - runtimebackend "github.com/highcard-dev/daemon/internal/runtime" - runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/highcard-dev/daemon/internal/utils/logger" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var runtimeSocket string -var k8sNamespace string -var k8sStorageClass string -var k8sPullImage string -var k8sRegistrySecret string -var hubbleRelayAddr string -var k8sKubeconfig string -var runtimeListen string -var runtimePublicListen string -var runtimeInternalToken string - -var ServeCommand = &cobra.Command{ - Use: "serve", - Short: "Run the multi-scroll runtime daemon", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runRuntimeDaemon() - }, -} - -func init() { - ServeCommand.Flags().StringVar(&runtimeSocket, "socket", utils.DefaultRuntimeSocketPath(), "Runtime daemon Unix socket path") - ServeCommand.Flags().StringVar(&runtimeListen, "listen", "", "Optional management HTTP listen address, for example :8081") - ServeCommand.Flags().StringVar(&runtimePublicListen, "public-listen", "", "Optional public dashboard HTTP listen address, for example :8082") - ServeCommand.Flags().StringVar(&runtimeInternalToken, "internal-token", "", "Optional bearer token required for management HTTP API requests") - ServeCommand.Flags().StringVar(&runtimeStateDir, "state-dir", "", "Runtime state directory (default: ~/.druid/runtime)") - ServeCommand.Flags().StringVar(&runtimeBackend, "runtime", "docker", "Default runtime backend. Valid values: docker, kubernetes") - ServeCommand.Flags().StringVar(&k8sNamespace, "k8s-namespace", "", "Kubernetes namespace for runtime resources (default: service account namespace or DRUID_K8S_NAMESPACE)") - ServeCommand.Flags().StringVar(&k8sStorageClass, "k8s-storage-class", "", "Kubernetes storage class for runtime PVCs (default: DRUID_K8S_STORAGE_CLASS)") - ServeCommand.Flags().StringVar(&k8sPullImage, "k8s-pull-image", "", "Kubernetes image used for OCI pull materialization Jobs (default: DRUID_K8S_PULL_IMAGE)") - ServeCommand.Flags().StringVar(&k8sRegistrySecret, "k8s-registry-secret", "", "Kubernetes imagePullSecret used by runtime Jobs (default: DRUID_K8S_REGISTRY_SECRET)") - ServeCommand.Flags().StringVar(&k8sKubeconfig, "k8s-kubeconfig", "", "Kubernetes kubeconfig path for out-of-cluster runtime access (default: DRUID_K8S_KUBECONFIG, KUBECONFIG, or ~/.kube/config)") - ServeCommand.Flags().StringVar(&hubbleRelayAddr, "hubble-relay-addr", "", "Hubble Relay gRPC address for Kubernetes port traffic (default: DRUID_HUBBLE_RELAY_ADDR or hubble-relay.kube-system.svc.cluster.local:80)") -} - -func runRuntimeDaemon() error { - kubernetesConfig := runtimekubernetes.Config{ - Namespace: k8sNamespace, - StorageClass: k8sStorageClass, - PullImage: k8sPullImage, - RegistrySecret: k8sRegistrySecret, - HubbleRelayAddr: hubbleRelayAddr, - Kubeconfig: k8sKubeconfig, - } - store, err := appservices.NewRuntimeStoreForBackend(runtimeStateDir, runtimeBackend, kubernetesConfig) - if err != nil { - return err - } - manager := services.NewRuntimeScrollManager(store) - logManager := services.NewLogManager() - consoleService := services.NewConsoleManager(logManager) - supervisor := appservices.NewRuntimeSupervisor(store, manager, consoleService, runtimeBackend, runtimebackend.WithKubernetesConfig(kubernetesConfig)) - if err := supervisor.Start(); err != nil { - return err - } - - if runtimeInternalToken == "" { - runtimeInternalToken = os.Getenv("DRUID_INTERNAL_TOKEN") - } - handlers := runtimehandlers.RouteHandlers{ - Server: runtimehandlers.NewRuntimeServer( - runtimehandlers.NewHealthHandler(), - runtimehandlers.NewScrollHandler(supervisor, consoleService, logManager), - ), - Websocket: runtimehandlers.NewWebsocketHandler(consoleService), - } - - managementApp := fiber.New(fiber.Config{DisableStartupMessage: true}) - if runtimeInternalToken != "" { - managementApp.Use(func(c *fiber.Ctx) error { - path := c.Path() - if path == "/health" || path == "/api/v1/health" { - return c.Next() - } - token := strings.TrimPrefix(c.Get("Authorization"), "Bearer ") - if token == "" { - token = c.Get("X-Druid-Internal-Token") - } - if token != runtimeInternalToken { - return fiber.NewError(fiber.StatusUnauthorized, "invalid internal runtime token") - } - return c.Next() - }) - } - runtimehandlers.RegisterManagementRoutes(managementApp, handlers) - - var publicApp *fiber.App - if runtimePublicListen != "" { - publicApp = fiber.New(fiber.Config{DisableStartupMessage: true}) - runtimehandlers.RegisterPublicRoutes(publicApp, handlers) - } - return listenRuntimeHTTP(managementApp, publicApp, store.StateDir()) -} - -func listenRuntimeHTTP(managementApp *fiber.App, publicApp *fiber.App, stateDir string) error { - errCh := make(chan error, 2) - if runtimeListen != "" { - go func() { - logger.Log().Info("Starting runtime management listener", zap.String("listen", runtimeListen), zap.String("stateDir", stateDir)) - errCh <- managementApp.Listen(runtimeListen) - }() - } else { - go func() { - errCh <- listenRuntimeDaemon(managementApp, stateDir) - }() - } - if publicApp != nil { - go func() { - logger.Log().Info("Starting runtime public listener", zap.String("listen", runtimePublicListen), zap.String("stateDir", stateDir)) - errCh <- publicApp.Listen(runtimePublicListen) - }() - } - return <-errCh -} - -func listenRuntimeDaemon(app *fiber.App, stateDir string) error { - if runtimeSocket == "" { - runtimeSocket = utils.DefaultRuntimeSocketPath() - } - if err := os.MkdirAll(filepath.Dir(runtimeSocket), 0755); err != nil { - return err - } - _ = os.Remove(runtimeSocket) - listener, err := net.Listen("unix", runtimeSocket) - if err != nil { - return err - } - defer os.Remove(runtimeSocket) - logger.Log().Info("Starting runtime daemon", zap.String("socket", runtimeSocket), zap.String("stateDir", stateDir)) - return app.Listener(listener) -} diff --git a/apps/druid/adapters/cli/update.go b/apps/druid/adapters/cli/update.go index 8c682b42..89d130b5 100644 --- a/apps/druid/adapters/cli/update.go +++ b/apps/druid/adapters/cli/update.go @@ -12,7 +12,6 @@ import ( "github.com/highcard-dev/daemon/internal/utils/logger" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/spf13/cobra" - "github.com/spf13/viper" ) var updateIncludeData bool @@ -45,14 +44,14 @@ var UpdateCommand = &cobra.Command{ return fmt.Errorf("invalid artifact reference %q (expected repo:tag or repo@sha256:digest)", artifact) } if kind == utils.ArtifactRefKindDigest { - return fmt.Errorf("update only supports tag references (repo:tag). For digests, use `druid-client pull %s`", artifact) + return fmt.Errorf("update only supports tag references (repo:tag). For digests, use `druid pull %s`", artifact) } tag := ref //ctx := context.Background() logger.Log().Info("Checking for updates for " + artifact) - registryClient := registry.NewOciClient(loadUpdateRegistryStore()) + registryClient := registry.NewOciClient(loadRegistryStore()) canUpdate := false @@ -92,23 +91,6 @@ var UpdateCommand = &cobra.Command{ } func init() { + RootCmd.AddCommand(UpdateCommand) UpdateCommand.Flags().BoolVar(&updateIncludeData, "include-data", false, "Also pull scroll data layers") } - -func loadUpdateRegistryStore() *registry.CredentialStore { - var registries []domain.RegistryCredential - viper.UnmarshalKey("registries", ®istries) - if len(registries) == 0 { - host := viper.GetString("registry.host") - user := viper.GetString("registry.user") - password := viper.GetString("registry.password") - if host != "" { - registries = append(registries, domain.RegistryCredential{ - Host: host, - Username: user, - Password: password, - }) - } - } - return registry.NewCredentialStore(registries) -} diff --git a/apps/druid/adapters/cli/validate.go b/apps/druid/adapters/cli/validate.go index ca5651ed..07e2a99a 100644 --- a/apps/druid/adapters/cli/validate.go +++ b/apps/druid/adapters/cli/validate.go @@ -36,5 +36,6 @@ var ValidateCmd = &cobra.Command{ } func init() { + RootCmd.AddCommand(ValidateCmd) ValidateCmd.Flags().BoolVar(&strict, "strict", false, "Enable strict validation mode") } diff --git a/apps/druid/adapters/cli/version.go b/apps/druid/adapters/cli/version.go index 9a0286b5..5fb026a1 100644 --- a/apps/druid/adapters/cli/version.go +++ b/apps/druid/adapters/cli/version.go @@ -14,3 +14,7 @@ var VersionCmd = &cobra.Command{ return nil }, } + +func init() { + RootCmd.AddCommand(VersionCmd) +} diff --git a/apps/druid/adapters/cli/worker.go b/apps/druid/adapters/cli/worker.go new file mode 100644 index 00000000..2ba67d26 --- /dev/null +++ b/apps/druid/adapters/cli/worker.go @@ -0,0 +1,12 @@ +package cli + +import "github.com/spf13/cobra" + +var WorkerCommand = &cobra.Command{ + Use: "worker", + Short: "Run internal Druid worker actions", +} + +func init() { + RootCmd.AddCommand(WorkerCommand) +} diff --git a/apps/druid/adapters/cli/worker_pull.go b/apps/druid/adapters/cli/worker_pull.go new file mode 100644 index 00000000..1e9b7f9c --- /dev/null +++ b/apps/druid/adapters/cli/worker_pull.go @@ -0,0 +1,320 @@ +package cli + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/highcard-dev/daemon/internal/callbackapi" + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var workerPullAction ports.RuntimeWorkerAction +var workerPullMode string + +var WorkerPullCommand = &cobra.Command{ + Use: "pull", + Short: "Pull or update a runtime root and report the result", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + workerPullAction.Mode = ports.RuntimeWorkerMode(workerPullMode) + if workerPullAction.Mode == "" { + workerPullAction.Mode = ports.RuntimeWorkerModeCreate + } + if workerPullAction.CallbackToken == "" { + workerPullAction.CallbackToken = os.Getenv("DRUID_WORKER_TOKEN") + } + result := runWorkerPull(workerPullAction) + if result.Error != "" { + _ = reportWorkerResult(workerPullAction, result) + return fmt.Errorf("%s", result.Error) + } + return reportWorkerResult(workerPullAction, result) + }, +} + +func init() { + WorkerCommand.AddCommand(WorkerPullCommand) + WorkerPullCommand.Flags().StringVar(&workerPullAction.Artifact, "artifact", "", "OCI artifact to pull") + WorkerPullCommand.Flags().StringVar(&workerPullAction.RuntimeID, "runtime-id", "", "Runtime scroll id") + WorkerPullCommand.Flags().StringVar(&workerPullAction.MountPath, "root", "/scroll", "Mounted runtime root path") + WorkerPullCommand.Flags().StringVar(&workerPullAction.CallbackURL, "callback-url", "", "Daemon worker callback URL") + WorkerPullCommand.Flags().StringVar(&workerPullAction.CallbackToken, "callback-token", "", "One-time worker callback token") + WorkerPullCommand.Flags().StringVar(&workerPullMode, "mode", string(ports.RuntimeWorkerModeCreate), "Pull mode: create or update") + WorkerPullCommand.MarkFlagRequired("artifact") + WorkerPullCommand.MarkFlagRequired("runtime-id") +} + +func runWorkerPull(action ports.RuntimeWorkerAction) ports.RuntimeWorkerResult { + result := ports.RuntimeWorkerResult{} + if action.Artifact == "" { + result.Error = "artifact is required" + return result + } + root := action.MountPath + if root == "" { + root = "/scroll" + } + oci := registry.NewOciClient(loadWorkerRegistryStore()) + digest, err := oci.ResolveDigest(action.Artifact) + if err == nil { + result.ArtifactDigest = digest + } + if action.Mode == ports.RuntimeWorkerModeUpdate { + err = pullWorkerUpdate(root, action.Artifact, oci) + } else { + err = pullWorkerCreate(root, action.Artifact, oci) + } + if err != nil { + result.Error = err.Error() + return result + } + scrollYAML, err := os.ReadFile(filepath.Join(root, "scroll.yaml")) + if err != nil { + result.Error = err.Error() + return result + } + if _, err := domain.NewScrollFromBytes(root, scrollYAML); err != nil { + result.Error = err.Error() + return result + } + result.ScrollYAML = string(scrollYAML) + return result +} + +func loadWorkerRegistryStore() *registry.CredentialStore { + var config struct { + Registries []domain.RegistryCredential `json:"registries"` + } + if raw := os.Getenv("DRUID_RUNTIME_REGISTRY_CONFIG_JSON"); raw != "" { + _ = json.Unmarshal([]byte(raw), &config) + } + if len(config.Registries) == 0 { + _ = viper.UnmarshalKey("registries", &config.Registries) + } + return registry.NewCredentialStore(config.Registries) +} + +func pullWorkerCreate(root string, artifact string, oci ports.OciRegistryInterface) error { + if err := os.MkdirAll(root, 0755); err != nil { + return err + } + entries, err := os.ReadDir(root) + if err != nil { + return err + } + for _, entry := range entries { + if err := os.RemoveAll(filepath.Join(root, entry.Name())); err != nil { + return err + } + } + if info, err := os.Stat(artifact); err == nil { + if !info.IsDir() { + if filepath.Base(artifact) != "scroll.yaml" { + return fmt.Errorf("local file artifact must be scroll.yaml") + } + return copyPath(artifact, filepath.Join(root, "scroll.yaml")) + } + return copyPath(artifact, root) + } + return oci.PullSelective(root, artifact, true, nil) +} + +func pullWorkerUpdate(root string, artifact string, oci ports.OciRegistryInterface) error { + tmp, err := os.MkdirTemp("", "druid-worker-update-*") + if err != nil { + return err + } + defer os.RemoveAll(tmp) + if err := coreservices.MaterializeScrollArtifact(artifact, tmp, oci, true); err != nil { + return err + } + scrollYAML, err := os.ReadFile(filepath.Join(tmp, "scroll.yaml")) + if err != nil { + return err + } + scroll, err := domain.NewScrollFromBytes(tmp, scrollYAML) + if err != nil { + return err + } + skipData := map[string]bool{} + collectSkipUpdatePaths(skipData, "", scroll.Chunks) + return mergePulledRoot(tmp, root, skipData) +} + +func collectSkipUpdatePaths(out map[string]bool, parent string, chunks []*domain.Chunks) { + for _, chunk := range chunks { + if chunk == nil { + continue + } + chunkPath := filepath.ToSlash(filepath.Clean(filepath.Join(parent, filepath.FromSlash(chunk.Path)))) + if chunkPath == "." { + chunkPath = "" + } + if chunk.SkipUpdate { + out[chunkPath] = true + } + collectSkipUpdatePaths(out, chunkPath, chunk.Chunks) + } +} + +func mergePulledRoot(src string, dst string, skipData map[string]bool) error { + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + entries, err := os.ReadDir(src) + if err != nil { + return err + } + for _, entry := range entries { + name := entry.Name() + srcPath := filepath.Join(src, name) + dstPath := filepath.Join(dst, name) + if name == domain.RuntimeDataDir { + if err := copyDataUpdate(srcPath, dstPath, skipData); err != nil { + return err + } + continue + } + if err := os.RemoveAll(dstPath); err != nil { + return err + } + if err := copyPath(srcPath, dstPath); err != nil { + return err + } + } + return nil +} + +func copyDataUpdate(srcData string, dstData string, skipData map[string]bool) error { + return filepath.WalkDir(srcData, func(srcPath string, entry os.DirEntry, err error) error { + if err != nil { + return err + } + rel, err := filepath.Rel(srcData, srcPath) + if err != nil { + return err + } + if rel == "." { + return os.MkdirAll(dstData, 0755) + } + rel = filepath.ToSlash(rel) + if shouldSkipWorkerUpdate(rel, skipData) { + if entry.IsDir() { + return filepath.SkipDir + } + return nil + } + target := filepath.Join(dstData, filepath.FromSlash(rel)) + if entry.IsDir() { + info, err := entry.Info() + if err != nil { + return err + } + return os.MkdirAll(target, info.Mode().Perm()) + } + return copyPath(srcPath, target) + }) +} + +func shouldSkipWorkerUpdate(rel string, skipData map[string]bool) bool { + rel = filepath.ToSlash(filepath.Clean(rel)) + for skip := range skipData { + if skip == "" || rel == skip || strings.HasPrefix(rel, skip+"/") { + return true + } + } + return false +} + +func copyPath(src string, dst string) error { + info, err := os.Stat(src) + if err != nil { + return err + } + if info.IsDir() { + return filepath.WalkDir(src, func(path string, entry os.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + target := filepath.Join(dst, rel) + if entry.IsDir() { + info, err := entry.Info() + if err != nil { + return err + } + return os.MkdirAll(target, info.Mode().Perm()) + } + return copyPath(path, target) + }) + } + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode().Perm()) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} + +func reportWorkerResult(action ports.RuntimeWorkerAction, result ports.RuntimeWorkerResult) error { + if action.CallbackURL == "" { + body, err := json.Marshal(result) + if err != nil { + return err + } + fmt.Println(string(body)) + return nil + } + suffix := "/internal/v1/workers/" + action.RuntimeID + "/complete" + base := strings.TrimSuffix(action.CallbackURL, suffix) + if base == action.CallbackURL || base == "" { + return fmt.Errorf("worker callback URL %q must end with %s", action.CallbackURL, suffix) + } + client, err := callbackapi.NewClientWithResponses(base) + if err != nil { + return err + } + body := callbackapi.WorkerResult{ + ArtifactDigest: workerString(result.ArtifactDigest), + Error: workerString(result.Error), + ScrollYaml: workerString(result.ScrollYAML), + Token: action.CallbackToken, + } + res, err := client.CompleteWorkerWithResponse(context.Background(), action.RuntimeID, body) + if err != nil { + return err + } + if res.StatusCode() >= 400 { + return fmt.Errorf("worker callback returned %d: %s", res.StatusCode(), strings.TrimSpace(string(res.Body))) + } + return nil +} + +func workerString(value string) *string { + if value == "" { + return nil + } + return &value +} diff --git a/apps/druid/adapters/cli/worker_push.go b/apps/druid/adapters/cli/worker_push.go new file mode 100644 index 00000000..5e0b1cdc --- /dev/null +++ b/apps/druid/adapters/cli/worker_push.go @@ -0,0 +1,42 @@ +package cli + +import ( + "fmt" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils" + "github.com/spf13/cobra" +) + +var workerPushArtifact string +var workerPushRoot string + +var WorkerPushCommand = &cobra.Command{ + Use: "push", + Short: "Push a mounted runtime root as an OCI artifact", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + if workerPushArtifact == "" { + return fmt.Errorf("artifact is required") + } + if workerPushRoot == "" { + workerPushRoot = "/scroll" + } + scroll, err := domain.NewScroll(workerPushRoot) + if err != nil { + return err + } + repo, tag := utils.SplitArtifact(workerPushArtifact) + oci := registry.NewOciClient(loadWorkerRegistryStore()) + _, err = oci.Push(workerPushRoot, repo, tag, nil, false, &scroll.File) + return err + }, +} + +func init() { + WorkerCommand.AddCommand(WorkerPushCommand) + WorkerPushCommand.Flags().StringVar(&workerPushArtifact, "artifact", "", "OCI artifact to push") + WorkerPushCommand.Flags().StringVar(&workerPushRoot, "root", "/scroll", "Mounted runtime root path") + WorkerPushCommand.MarkFlagRequired("artifact") +} diff --git a/apps/druid/adapters/cli/worker_test.go b/apps/druid/adapters/cli/worker_test.go new file mode 100644 index 00000000..ce4f149b --- /dev/null +++ b/apps/druid/adapters/cli/worker_test.go @@ -0,0 +1,114 @@ +package cli + +import ( + "os" + "path/filepath" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/spf13/cobra" +) + +func TestWorkerPullCommandUsesRuntimeIDOnly(t *testing.T) { + if flag := WorkerPullCommand.Flags().Lookup("runtime-id"); flag == nil { + t.Fatal("worker pull should expose --runtime-id") + } + if flag := WorkerPullCommand.Flags().Lookup("action-id"); flag != nil { + t.Fatal("worker pull should not expose --action-id") + } +} + +func TestWorkerPullCommandRequiresRuntimeID(t *testing.T) { + flag := WorkerPullCommand.Flags().Lookup("runtime-id") + if flag == nil { + t.Fatal("worker pull should expose --runtime-id") + } + if got := flag.Annotations[cobra.BashCompOneRequiredFlag]; len(got) != 1 || got[0] != "true" { + t.Fatalf("runtime-id required annotation = %#v, want true", got) + } +} + +func TestWorkerUpdateMergePreservesSkipUpdateAndExtraFiles(t *testing.T) { + src := t.TempDir() + dst := t.TempDir() + mustWrite(t, filepath.Join(src, "scroll.yaml"), "name: next\n") + mustWrite(t, filepath.Join(src, "data", "keep", "state.txt"), "new") + mustWrite(t, filepath.Join(src, "data", "overwrite.txt"), "new") + mustWrite(t, filepath.Join(dst, "scroll.yaml"), "name: old\n") + mustWrite(t, filepath.Join(dst, "data", "keep", "state.txt"), "old") + mustWrite(t, filepath.Join(dst, "data", "overwrite.txt"), "old") + mustWrite(t, filepath.Join(dst, "data", "extra.txt"), "extra") + + if err := mergePulledRoot(src, dst, map[string]bool{"keep": true}); err != nil { + t.Fatal(err) + } + assertFile(t, filepath.Join(dst, "scroll.yaml"), "name: next\n") + assertFile(t, filepath.Join(dst, "data", "keep", "state.txt"), "old") + assertFile(t, filepath.Join(dst, "data", "overwrite.txt"), "new") + assertFile(t, filepath.Join(dst, "data", "extra.txt"), "extra") +} + +func TestWorkerCollectSkipUpdatePaths(t *testing.T) { + root := filepath.Join(t.TempDir(), "root") + mustWrite(t, filepath.Join(root, "scroll.yaml"), `name: skip-test +desc: test +version: 0.1.0 +app_version: "1" +serve: start +chunks: + - name: saves + path: saves + skip_update: true + - name: parent + path: server + chunks: + - name: cache + path: cache + skip_update: true +commands: + start: + procedures: + - image: alpine:3.20 + command: ["true"] +`) + result := runWorkerPullForSkipPathTest(t, root) + if !result["saves"] || !result["server/cache"] { + t.Fatalf("skip paths = %#v", result) + } +} + +func runWorkerPullForSkipPathTest(t *testing.T, root string) map[string]bool { + t.Helper() + scrollYAML, err := os.ReadFile(filepath.Join(root, "scroll.yaml")) + if err != nil { + t.Fatal(err) + } + scroll, err := domain.NewScrollFromBytes(root, scrollYAML) + if err != nil { + t.Fatal(err) + } + result := map[string]bool{} + collectSkipUpdatePaths(result, "", scroll.Chunks) + return result +} + +func mustWrite(t *testing.T, path string, data string) { + t.Helper() + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(path, []byte(data), 0644); err != nil { + t.Fatal(err) + } +} + +func assertFile(t *testing.T, path string, want string) { + t.Helper() + data, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + if string(data) != want { + t.Fatalf("%s = %q, want %q", path, string(data), want) + } +} diff --git a/apps/druid-client/adapters/daemon/openapi_client.go b/apps/druid/adapters/daemonclient/openapi_client.go similarity index 62% rename from apps/druid-client/adapters/daemon/openapi_client.go rename to apps/druid/adapters/daemonclient/openapi_client.go index ba9ba302..51e0b88d 100644 --- a/apps/druid-client/adapters/daemon/openapi_client.go +++ b/apps/druid/adapters/daemonclient/openapi_client.go @@ -1,11 +1,15 @@ -package daemon +package daemonclient import ( + "bytes" "context" + "encoding/json" "errors" "fmt" + "io" "net" "net/http" + "net/url" "strings" "github.com/highcard-dev/daemon/internal/api" @@ -15,10 +19,24 @@ import ( var ErrMaterializationUnsupported = errors.New("daemon materialization unsupported") type OpenAPIClient struct { - client *api.ClientWithResponses + client *api.ClientWithResponses + server string + httpClient *http.Client } func NewOpenAPIClient(daemonSocket string) (*OpenAPIClient, error) { + return NewOpenAPIClientForTarget(daemonSocket, "") +} + +func NewOpenAPIClientForTarget(daemonSocket string, daemonURL string) (*OpenAPIClient, error) { + if daemonURL != "" { + server := strings.TrimRight(daemonURL, "/") + client, err := api.NewClientWithResponses(server) + if err != nil { + return nil, err + } + return &OpenAPIClient{client: client, server: server, httpClient: http.DefaultClient}, nil + } if daemonSocket == "" { daemonSocket = utils.DefaultRuntimeSocketPath() } @@ -27,33 +45,27 @@ func NewOpenAPIClient(daemonSocket string) (*OpenAPIClient, error) { return (&net.Dialer{}).DialContext(ctx, "unix", daemonSocket) }, } - client, err := api.NewClientWithResponses("http://druid", api.WithHTTPClient(&http.Client{Transport: transport})) + httpClient := &http.Client{Transport: transport} + client, err := api.NewClientWithResponses("http://druid", api.WithHTTPClient(httpClient)) if err != nil { return nil, err } - return &OpenAPIClient{client: client}, nil + return &OpenAPIClient{client: client, server: "http://druid", httpClient: httpClient}, nil } -func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact string, scrollRoot string, dataRoot string, start bool) (*api.RuntimeScroll, error) { +func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact string, registryCredentials []api.RegistryCredential) (*api.RuntimeScroll, error) { var requestName *string if name != "" { requestName = &name } - var requestScrollRoot *string - if scrollRoot != "" { - requestScrollRoot = &scrollRoot - } - var requestDataRoot *string - if dataRoot != "" { - requestDataRoot = &dataRoot - } - res, err := c.client.CreateScrollWithResponse(ctx, api.CreateScrollJSONRequestBody{ - Artifact: artifact, - Name: requestName, - ScrollRoot: requestScrollRoot, - DataRoot: requestDataRoot, - Start: &start, - }) + request := api.CreateScrollJSONRequestBody{ + Artifact: artifact, + Name: requestName, + } + if len(registryCredentials) > 0 { + request.RegistryCredentials = ®istryCredentials + } + res, err := c.client.CreateScrollWithResponse(ctx, request) if err != nil { return nil, err } @@ -174,6 +186,55 @@ func (c *OpenAPIClient) ApplyScrollRouting(ctx context.Context, id string, assig return res.JSON200, nil } +func (c *OpenAPIClient) EnableWatch(ctx context.Context, id string, request api.DevWatchRequest) (*api.DevWatchResponse, error) { + var out api.DevWatchResponse + return &out, c.doJSON(ctx, http.MethodPost, fmt.Sprintf("/api/v1/scrolls/%s/dev/enable", url.PathEscape(id)), request, &out) +} + +func (c *OpenAPIClient) DisableWatch(ctx context.Context, id string) (*api.DevWatchResponse, error) { + var out api.DevWatchResponse + return &out, c.doJSON(ctx, http.MethodPost, fmt.Sprintf("/api/v1/scrolls/%s/dev/disable", url.PathEscape(id)), nil, &out) +} + +func (c *OpenAPIClient) WatchStatus(ctx context.Context, id string) (*api.DevWatchStatus, error) { + var out api.DevWatchStatus + return &out, c.doJSON(ctx, http.MethodGet, fmt.Sprintf("/api/v1/scrolls/%s/dev/status", url.PathEscape(id)), nil, &out) +} + +func (c *OpenAPIClient) doJSON(ctx context.Context, method string, path string, body any, out any) error { + var reader io.Reader + if body != nil { + data, err := json.Marshal(body) + if err != nil { + return err + } + reader = bytes.NewReader(data) + } + req, err := http.NewRequestWithContext(ctx, method, c.server+path, reader) + if err != nil { + return err + } + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + data, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + if err := ensureStatus(resp.StatusCode, data); err != nil { + return err + } + if out == nil || len(data) == 0 { + return nil + } + return json.Unmarshal(data, out) +} + func ensureStatus(statusCode int, body []byte) error { if statusCode < 400 { return nil diff --git a/apps/druid-client/adapters/daemon/openapi_client_test.go b/apps/druid/adapters/daemonclient/openapi_client_test.go similarity index 66% rename from apps/druid-client/adapters/daemon/openapi_client_test.go rename to apps/druid/adapters/daemonclient/openapi_client_test.go index 223b4077..d94945b3 100644 --- a/apps/druid-client/adapters/daemon/openapi_client_test.go +++ b/apps/druid/adapters/daemonclient/openapi_client_test.go @@ -1,4 +1,4 @@ -package daemon +package daemonclient import ( "encoding/json" @@ -9,10 +9,8 @@ import ( "github.com/highcard-dev/daemon/internal/api" ) -func TestCreateScrollSendsStartFalse(t *testing.T) { - var got struct { - Start *bool `json:"start"` - } +func TestCreateScrollDoesNotSendStart(t *testing.T) { + var got map[string]interface{} server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/v1/scrolls" { t.Fatalf("path = %s, want /api/v1/scrolls", r.URL.Path) @@ -22,7 +20,7 @@ func TestCreateScrollSendsStartFalse(t *testing.T) { } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusCreated) - _, _ = w.Write([]byte(`{"id":"scroll-a","artifact":"artifact","scroll_root":"/root","data_root":"/root","scroll_name":"scroll","status":"created","created_at":"2026-05-10T00:00:00Z","updated_at":"2026-05-10T00:00:00Z"}`)) + _, _ = w.Write([]byte(`{"id":"scroll-a","artifact":"artifact","root":"/root","scroll_name":"scroll","status":"created","created_at":"2026-05-10T00:00:00Z","updated_at":"2026-05-10T00:00:00Z"}`)) })) defer server.Close() client, err := api.NewClientWithResponses(server.URL) @@ -31,10 +29,10 @@ func TestCreateScrollSendsStartFalse(t *testing.T) { } openAPIClient := &OpenAPIClient{client: client} - if _, err := openAPIClient.CreateScroll(t.Context(), "scroll-a", "artifact", "", "", false); err != nil { + if _, err := openAPIClient.CreateScroll(t.Context(), "scroll-a", "artifact", nil); err != nil { t.Fatal(err) } - if got.Start == nil || *got.Start { - t.Fatalf("start = %#v, want false", got.Start) + if _, ok := got["start"]; ok { + t.Fatalf("create request should not send start: %#v", got) } } diff --git a/apps/druid/adapters/http/handlers/auth.go b/apps/druid/adapters/http/handlers/auth.go new file mode 100644 index 00000000..6f660acd --- /dev/null +++ b/apps/druid/adapters/http/handlers/auth.go @@ -0,0 +1,71 @@ +package handlers + +import ( + "github.com/gofiber/contrib/websocket" + "github.com/gofiber/fiber/v2" + "github.com/highcard-dev/daemon/internal/core/ports" +) + +const ownerLocal = "druid-owner-id" + +func (h *ScrollHandler) PublicAuth(c *fiber.Ctx) error { + if h.authorizer == nil { + return c.Next() + } + auth, err := h.authorizer.CheckHeader(c) + if err != nil { + return fiber.NewError(fiber.StatusUnauthorized, err.Error()) + } + if auth == nil { + return c.Next() + } + c.Locals(ownerLocal, auth.Subject) + id := c.Params("id") + if id == "" { + return c.Next() + } + if err := h.authorizeRuntimeOwner(id, auth.Subject); err != nil { + return err + } + return c.Next() +} + +func (h *ScrollHandler) authorizeRuntimeOwner(id string, subject string) error { + if subject == "" { + return fiber.NewError(fiber.StatusUnauthorized, "missing subject") + } + runtimeScroll, err := h.supervisor.Get(id) + if err != nil { + return err + } + if runtimeScroll.OwnerID != "" && runtimeScroll.OwnerID != subject { + return fiber.NewError(fiber.StatusForbidden, "runtime owner mismatch") + } + return nil +} + +func (h *WebsocketHandler) PublicQueryAuth(c *websocket.Conn) bool { + if h.authorizer == nil { + return true + } + if _, err := h.authorizer.CheckQuery(c.Params("id"), c.Query("token")); err != nil { + return false + } + return true +} + +type jwksProvider interface { + JWKS() map[string]any +} + +func RuntimeJWKS(authorizer ports.AuthorizerServiceInterface) fiber.Handler { + return func(c *fiber.Ctx) error { + if authorizer == nil { + return c.JSON(map[string]any{"keys": []any{}}) + } + if provider, ok := authorizer.(jwksProvider); ok { + return c.JSON(provider.JWKS()) + } + return c.JSON(map[string]any{"keys": []any{}}) + } +} diff --git a/apps/druid/adapters/http/handlers/dev_handler.go b/apps/druid/adapters/http/handlers/dev_handler.go new file mode 100644 index 00000000..141eb2d2 --- /dev/null +++ b/apps/druid/adapters/http/handlers/dev_handler.go @@ -0,0 +1,120 @@ +package handlers + +import ( + "time" + + "github.com/gofiber/contrib/websocket" + "github.com/gofiber/fiber/v2" + appservices "github.com/highcard-dev/daemon/apps/druid/core/services" + "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/core/domain" +) + +func (h *ScrollHandler) CreateDaemonToken(c *fiber.Ctx) error { + if h == nil || h.authorizer == nil { + return c.JSON(map[string]string{"token": ""}) + } + runtimeScroll, err := h.getScroll(c.Params("id")) + if err != nil { + return err + } + ownerID := runtimeScroll.OwnerID + if subject, ok := c.Locals(ownerLocal).(string); ok && subject != "" { + ownerID = subject + } + if h.authorizer == nil { + return c.JSON(map[string]string{"token": ""}) + } + return c.JSON(map[string]string{"token": h.authorizer.GenerateQueryToken(runtimeScroll.ID, ownerID)}) +} + +func (h *ScrollHandler) AddDaemonCommand(c *fiber.Ctx) error { + var request domain.CommandInstructionSet + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + if err := h.supervisor.AddCommand(c.Params("id"), c.Params("command"), &request); err != nil { + return err + } + return c.SendStatus(fiber.StatusNoContent) +} + +func (h *ScrollHandler) GetDaemonWatchStatus(c *fiber.Ctx) error { + status, err := h.supervisor.DevWatchStatus(c.Params("id")) + if err != nil { + return err + } + return c.JSON(status) +} + +func (h *ScrollHandler) EnableDaemonWatch(c *fiber.Ctx) error { + var request appservices.DevWatchRequest + if len(c.Body()) > 0 { + if err := c.BodyParser(&request); err != nil { + return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } + } + status, err := h.supervisor.EnableDevWatch(c.Params("id"), request) + if err != nil { + return err + } + return c.JSON(api.DevWatchResponse{Status: "enabled", Enabled: status.Enabled, WatchedPaths: status.WatchedPaths}) +} + +func (h *ScrollHandler) DisableDaemonWatch(c *fiber.Ctx) error { + status, err := h.supervisor.DisableDevWatch(c.Params("id")) + if err != nil { + return err + } + return c.JSON(api.DevWatchResponse{Status: "disabled", Enabled: status.Enabled, WatchedPaths: status.WatchedPaths}) +} + +func (h *WebsocketHandler) WatchNotifications(c *websocket.Conn) { + defer c.Close() + if h.scrolls == nil { + return + } + subscription, unsubscribe, err := h.scrolls.supervisor.SubscribeDevWatch(c.Params("id")) + if err != nil { + return + } + defer unsubscribe() + + done := make(chan struct{}) + go func() { + defer close(done) + for { + if _, _, err := c.ReadMessage(); err != nil { + return + } + } + }() + + pingTicker := time.NewTicker(30 * time.Second) + defer pingTicker.Stop() + for { + select { + case <-done: + return + case data, ok := <-subscription: + if !ok || data == nil { + return + } + if err := c.WriteMessage(websocket.TextMessage, *data); err != nil { + return + } + case <-pingTicker.C: + if err := c.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + } + } +} + +func (h *WebsocketHandler) WatchNotificationsPublic(c *websocket.Conn) { + if !h.PublicQueryAuth(c) { + _ = c.Close() + return + } + h.WatchNotifications(c) +} diff --git a/apps/druid/adapters/http/handlers/routes.go b/apps/druid/adapters/http/handlers/routes.go index 2ed32cb5..29d26b62 100644 --- a/apps/druid/adapters/http/handlers/routes.go +++ b/apps/druid/adapters/http/handlers/routes.go @@ -3,7 +3,9 @@ package handlers import ( "github.com/gofiber/contrib/websocket" "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/cors" "github.com/highcard-dev/daemon/internal/api" + "github.com/highcard-dev/daemon/internal/core/ports" ) type RouteHandlers struct { @@ -29,13 +31,34 @@ func RegisterManagementRoutes(app *fiber.App, handlers RouteHandlers) { api.RegisterHandlersWithOptions(app, handlers.Server, api.FiberServerOptions{}) app.Get("/health", handlers.Server.GetHealthAuth) app.Get("/ws/v1/scrolls/:id/consoles/:console", websocket.New(handlers.Websocket.AttachConsole)) + app.Get("/ws/v1/scrolls/:id/watch/notify", websocket.New(handlers.Websocket.WatchNotifications)) + app.Get("/api/v1/scrolls/:id/dev/status", handlers.Server.GetDaemonWatchStatus) + app.Post("/api/v1/scrolls/:id/dev/enable", handlers.Server.EnableDaemonWatch) + app.Post("/api/v1/scrolls/:id/dev/disable", handlers.Server.DisableDaemonWatch) } func RegisterPublicRoutes(app *fiber.App, handlers RouteHandlers) { + var authorizer ports.AuthorizerServiceInterface + if handlers.Server != nil && handlers.Server.ScrollHandler != nil { + authorizer = handlers.Server.ScrollHandler.authorizer + } + app.Use(cors.New(cors.Config{ + AllowOrigins: "*", + AllowMethods: "GET,POST,PUT,DELETE,PATCH,OPTIONS,HEAD,PROPFIND,MOVE,MKCOL,COPY", + AllowHeaders: "Origin,Content-Type,Accept,Authorization,X-Requested-With,Cache-Control,DNT,Keep-Alive,User-Agent,If-Modified-Since,Depth,Destination,Overwrite,If,Lock-Token,Timeout,Dav", + ExposeHeaders: "Druid-Version", + })) app.Get("/health", handlers.Server.GetHealthAuth) + app.Get("/.well-known/jwks.json", RuntimeJWKS(authorizer)) app.Get("/:id/ws/v1/serve/:console", websocket.New(handlers.Websocket.AttachScrollConsole)) + app.Get("/:id/ws/v1/watch/notify", websocket.New(handlers.Websocket.WatchNotificationsPublic)) + if handlers.Server != nil && handlers.Server.ScrollHandler != nil { + app.Use("/:id", handlers.Server.PublicAuth) + } app.Get("/:id/api/v1/health", handlers.Server.GetHealthAuth) + app.Get("/:id/api/v1/token", handlers.Server.CreateDaemonToken) app.Get("/:id/api/v1/scroll", handlers.Server.GetDaemonScroll) + app.Put("/:id/api/v1/scroll/commands/:command", handlers.Server.AddDaemonCommand) app.Post("/:id/api/v1/command", handlers.Server.RunDaemonCommand) app.Get("/:id/api/v1/queue", handlers.Server.GetDaemonQueue) app.Get("/:id/api/v1/procedures", handlers.Server.GetDaemonProcedures) @@ -43,5 +66,7 @@ func RegisterPublicRoutes(app *fiber.App, handlers RouteHandlers) { app.Get("/:id/api/v1/logs", handlers.Server.GetDaemonLogs) app.Get("/:id/api/v1/logs/:stream", handlers.Server.GetDaemonStreamLogs) app.Get("/:id/api/v1/ports", handlers.Server.GetDaemonPorts) - app.All("/:id/webdav/*", handlers.Server.ServeDaemonWebDAV) + app.Get("/:id/api/v1/watch/status", handlers.Server.GetDaemonWatchStatus) + app.Post("/:id/api/v1/watch/enable", handlers.Server.EnableDaemonWatch) + app.Post("/:id/api/v1/watch/disable", handlers.Server.DisableDaemonWatch) } diff --git a/apps/druid/adapters/http/handlers/routes_test.go b/apps/druid/adapters/http/handlers/routes_test.go index c2486836..6fb78a15 100644 --- a/apps/druid/adapters/http/handlers/routes_test.go +++ b/apps/druid/adapters/http/handlers/routes_test.go @@ -28,11 +28,30 @@ func TestRouteSplitKeepsManagementAndPublicSurfacesSeparate(t *testing.T) { if status := requestStatus(t, public, "/api/v1/scrolls"); status != http.StatusNotFound { t.Fatalf("public management list status = %d, want 404", status) } - if status := requestStatus(t, public, "/scroll-1/api/v1/token"); status != http.StatusNotFound { - t.Fatalf("public token compatibility route status = %d, want 404", status) + if status := requestStatus(t, public, "/scroll-1/api/v1/token"); status != http.StatusOK { + t.Fatalf("public token compatibility route status = %d, want 200", status) } - if status := requestStatus(t, public, "/scroll-1/api/v1/watch/status"); status != http.StatusNotFound { - t.Fatalf("public watch compatibility route status = %d, want 404", status) +} + +func TestPublicRoutesAnswerCorsPreflight(t *testing.T) { + handlers := RouteHandlers{Server: NewRuntimeServer(NewHealthHandler(), nil), Websocket: &WebsocketHandler{}} + public := fiber.New(fiber.Config{DisableStartupMessage: true}) + RegisterPublicRoutes(public, handlers) + + req := httptest.NewRequest(http.MethodOptions, "/scroll-1/api/v1/watch/status", nil) + req.Header.Set("Origin", "http://127.0.0.1:3000") + req.Header.Set("Access-Control-Request-Method", http.MethodGet) + req.Header.Set("Access-Control-Request-Headers", "authorization") + resp, err := public.Test(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("preflight status = %d, want 204", resp.StatusCode) + } + if got := resp.Header.Get("Access-Control-Allow-Origin"); got != "*" { + t.Fatalf("allow origin = %q, want *", got) } } diff --git a/apps/druid/adapters/http/handlers/scroll_handler.go b/apps/druid/adapters/http/handlers/scroll_handler.go index 75bd968e..c9406cc4 100644 --- a/apps/druid/adapters/http/handlers/scroll_handler.go +++ b/apps/druid/adapters/http/handlers/scroll_handler.go @@ -2,16 +2,13 @@ package handlers import ( "errors" - "mime" - "os" - "path/filepath" - "strconv" "strings" "github.com/gofiber/fiber/v2" appservices "github.com/highcard-dev/daemon/apps/druid/core/services" "github.com/highcard-dev/daemon/internal/api" "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" "github.com/highcard-dev/daemon/internal/core/services" ) @@ -19,32 +16,35 @@ type ScrollHandler struct { supervisor *appservices.RuntimeSupervisor consoleService *services.ConsoleManager logService *services.LogManager + authorizer ports.AuthorizerServiceInterface } -func NewScrollHandler(supervisor *appservices.RuntimeSupervisor, consoleService *services.ConsoleManager, logService *services.LogManager) *ScrollHandler { +func NewScrollHandler(supervisor *appservices.RuntimeSupervisor, consoleService *services.ConsoleManager, logService *services.LogManager, authorizer ...ports.AuthorizerServiceInterface) *ScrollHandler { + var auth ports.AuthorizerServiceInterface + if len(authorizer) > 0 { + auth = authorizer[0] + } return &ScrollHandler{ supervisor: supervisor, consoleService: consoleService, logService: logService, + authorizer: auth, } } -func runtimeRoots(scrollRoot *string, dataRoot *string) (string, string, error) { - scroll := "" - if scrollRoot != nil { - scroll = *scrollRoot - } - data := "" - if dataRoot != nil { - data = *dataRoot - } - if scroll == "" && data == "" { - return "", "", nil +func registryCredentials(in *[]api.RegistryCredential) []domain.RegistryCredential { + if in == nil || len(*in) == 0 { + return nil } - if scroll == "" || data == "" || scroll != data { - return "", "", errors.New("scroll_root and data_root are legacy fields and must be omitted or equal") + out := make([]domain.RegistryCredential, 0, len(*in)) + for _, credential := range *in { + out = append(out, domain.RegistryCredential{ + Host: credential.Host, + Username: credential.Username, + Password: credential.Password, + }) } - return scroll, scroll, nil + return out } func (h *ScrollHandler) ListScrolls(c *fiber.Ctx) error { @@ -66,15 +66,11 @@ func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { } else if request.Id != nil && *request.Id != "" { name = *request.Id } - scrollRoot, dataRoot, err := runtimeRoots(request.ScrollRoot, request.DataRoot) - if err != nil { - return fiber.NewError(fiber.StatusBadRequest, err.Error()) - } - start := true - if request.Start != nil { - start = *request.Start + ownerID := "" + if request.OwnerId != nil { + ownerID = *request.OwnerId } - runtimeScroll, err := h.supervisor.Create(request.Artifact, name, scrollRoot, dataRoot, start) + runtimeScroll, err := h.supervisor.CreateWithOwner(request.Artifact, name, ownerID, registryCredentials(request.RegistryCredentials)) if err != nil { if errors.Is(err, services.ErrScrollAlreadyExists) { return fiber.NewError(fiber.StatusConflict, err.Error()) @@ -88,32 +84,21 @@ func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { } func (h *ScrollHandler) EnsureScroll(c *fiber.Ctx) error { - var request struct { - ID *string `json:"id"` - Name *string `json:"name"` - Artifact string `json:"artifact"` - ScrollRoot *string `json:"scroll_root"` - DataRoot *string `json:"data_root"` - Start *bool `json:"start"` - } + var request api.EnsureScrollRequest if err := c.BodyParser(&request); err != nil { return fiber.NewError(fiber.StatusBadRequest, err.Error()) } name := "" if request.Name != nil && *request.Name != "" { name = *request.Name - } else if request.ID != nil && *request.ID != "" { - name = *request.ID - } - scrollRoot, dataRoot, err := runtimeRoots(request.ScrollRoot, request.DataRoot) - if err != nil { - return fiber.NewError(fiber.StatusBadRequest, err.Error()) + } else if request.Id != nil && *request.Id != "" { + name = *request.Id } - start := true - if request.Start != nil { - start = *request.Start + ownerID := "" + if request.OwnerId != nil { + ownerID = *request.OwnerId } - runtimeScroll, err := h.supervisor.Ensure(request.Artifact, name, scrollRoot, dataRoot, start) + runtimeScroll, err := h.supervisor.EnsureWithOwner(request.Artifact, name, ownerID, registryCredentials(request.RegistryCredentials)) if err != nil { if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { return fiber.NewError(fiber.StatusNotImplemented, err.Error()) @@ -291,45 +276,6 @@ func (h *ScrollHandler) GetDaemonPorts(c *fiber.Ctx) error { return h.GetScrollPorts(c, c.Params("id")) } -func (h *ScrollHandler) ServeDaemonWebDAV(c *fiber.Ctx) error { - c.Set("DAV", "1") - c.Set("Allow", "OPTIONS, GET, HEAD, PUT") - if c.Method() == fiber.MethodOptions { - return c.SendStatus(fiber.StatusNoContent) - } - relativePath := strings.TrimPrefix(c.Params("*"), "/") - if c.Method() == fiber.MethodPut { - if err := h.supervisor.WriteDataFile(c.Params("id"), relativePath, c.Body()); err != nil { - if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } - return err - } - return c.SendStatus(fiber.StatusNoContent) - } - if c.Method() != fiber.MethodGet && c.Method() != fiber.MethodHead { - return fiber.NewError(fiber.StatusMethodNotAllowed, "unsupported runtime WebDAV method") - } - data, err := h.supervisor.DataFile(c.Params("id"), relativePath) - if err != nil { - if errors.Is(err, os.ErrNotExist) || strings.Contains(err.Error(), "No such file") { - return fiber.NewError(fiber.StatusNotFound, err.Error()) - } - if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } - return err - } - if contentType := mime.TypeByExtension(filepath.Ext(relativePath)); contentType != "" { - c.Set(fiber.HeaderContentType, contentType) - } - c.Set(fiber.HeaderContentLength, strconv.Itoa(len(data))) - if c.Method() == fiber.MethodHead { - return c.SendStatus(fiber.StatusOK) - } - return c.Send(data) -} - func (h *ScrollHandler) GetScrollPorts(c *fiber.Ctx, id string) error { runtimeScroll, err := h.getScroll(id) if err != nil { @@ -348,9 +294,6 @@ func (h *ScrollHandler) GetScrollRoutingTargets(c *fiber.Ctx, id string) error { } targets, err := h.supervisor.RoutingTargets(id) if err != nil { - if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } return err } return c.JSON(targets) @@ -377,17 +320,12 @@ func (h *ScrollHandler) BackupScroll(c *fiber.Ctx, id string) error { if _, err := h.getScroll(id); err != nil { return err } - var request struct { - Artifact string `json:"artifact"` - } + var request api.RuntimeArtifactOperationRequest if err := c.BodyParser(&request); err != nil { return fiber.NewError(fiber.StatusBadRequest, err.Error()) } - runtimeScroll, err := h.supervisor.Backup(id, request.Artifact) + runtimeScroll, err := h.supervisor.Backup(id, request.Artifact, registryCredentials(request.RegistryCredentials)) if err != nil { - if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } return err } return c.JSON(runtimeScroll) @@ -397,18 +335,16 @@ func (h *ScrollHandler) RestoreScroll(c *fiber.Ctx, id string) error { if _, err := h.getScroll(id); err != nil { return err } - var request struct { - Artifact string `json:"artifact"` - Restart bool `json:"restart"` - } + var request api.RuntimeArtifactOperationRequest if err := c.BodyParser(&request); err != nil { return fiber.NewError(fiber.StatusBadRequest, err.Error()) } - runtimeScroll, err := h.supervisor.Restore(id, request.Artifact, request.Restart) + restart := false + if request.Restart != nil { + restart = *request.Restart + } + runtimeScroll, err := h.supervisor.Restore(id, request.Artifact, restart, registryCredentials(request.RegistryCredentials)) if err != nil { - if errors.Is(err, appservices.ErrRuntimeOperationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } return err } return c.JSON(runtimeScroll) diff --git a/apps/druid/adapters/http/handlers/scroll_handler_test.go b/apps/druid/adapters/http/handlers/scroll_handler_test.go deleted file mode 100644 index cdf9fa2b..00000000 --- a/apps/druid/adapters/http/handlers/scroll_handler_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package handlers - -import "testing" - -func TestRuntimeRootsAcceptsOmittedOrEqualLegacyRoots(t *testing.T) { - scrollRoot, dataRoot, err := runtimeRoots(nil, nil) - if err != nil { - t.Fatal(err) - } - if scrollRoot != "" || dataRoot != "" { - t.Fatalf("roots = %q/%q, want empty", scrollRoot, dataRoot) - } - - root := "/runtime/root" - scrollRoot, dataRoot, err = runtimeRoots(&root, &root) - if err != nil { - t.Fatal(err) - } - if scrollRoot != root || dataRoot != root { - t.Fatalf("roots = %q/%q, want %q", scrollRoot, dataRoot, root) - } -} - -func TestRuntimeRootsRejectsSplitLegacyRoots(t *testing.T) { - scrollRoot := "/runtime/spec" - dataRoot := "/runtime/data" - if _, _, err := runtimeRoots(&scrollRoot, &dataRoot); err == nil { - t.Fatal("expected split roots to fail") - } -} diff --git a/apps/druid/adapters/http/handlers/websocket_handler.go b/apps/druid/adapters/http/handlers/websocket_handler.go index 0875565d..60004a4e 100644 --- a/apps/druid/adapters/http/handlers/websocket_handler.go +++ b/apps/druid/adapters/http/handlers/websocket_handler.go @@ -4,6 +4,7 @@ import ( "time" "github.com/gofiber/contrib/websocket" + "github.com/highcard-dev/daemon/internal/core/ports" "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/utils/logger" "go.uber.org/zap" @@ -11,12 +12,22 @@ import ( type WebsocketHandler struct { consoleService *services.ConsoleManager + scrolls *ScrollHandler + authorizer ports.AuthorizerServiceInterface } func NewWebsocketHandler(consoleService *services.ConsoleManager) *WebsocketHandler { return &WebsocketHandler{consoleService: consoleService} } +func (h *WebsocketHandler) SetScrollHandler(scrolls *ScrollHandler) { + h.scrolls = scrolls +} + +func (h *WebsocketHandler) SetAuthorizer(authorizer ports.AuthorizerServiceInterface) { + h.authorizer = authorizer +} + func (h *WebsocketHandler) AttachConsole(c *websocket.Conn) { consoleID := c.Params("console") if id := c.Params("id"); id != "" { @@ -26,6 +37,10 @@ func (h *WebsocketHandler) AttachConsole(c *websocket.Conn) { } func (h *WebsocketHandler) AttachScrollConsole(c *websocket.Conn) { + if !h.PublicQueryAuth(c) { + _ = c.Close() + return + } h.AttachConsole(c) } diff --git a/apps/druid-client/adapters/websocket/attacher.go b/apps/druid/adapters/websocketclient/attacher.go similarity index 98% rename from apps/druid-client/adapters/websocket/attacher.go rename to apps/druid/adapters/websocketclient/attacher.go index 15e5fd40..905c5a5d 100644 --- a/apps/druid-client/adapters/websocket/attacher.go +++ b/apps/druid/adapters/websocketclient/attacher.go @@ -1,4 +1,4 @@ -package websocket +package websocketclient import ( "context" diff --git a/apps/druid/core/services/runtime_access.go b/apps/druid/core/services/runtime_access.go new file mode 100644 index 00000000..d07ea520 --- /dev/null +++ b/apps/druid/core/services/runtime_access.go @@ -0,0 +1,90 @@ +package services + +import ( + "context" + + "github.com/highcard-dev/daemon/internal/core/domain" +) + +func (s *RuntimeSupervisor) Run(id string, command string) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.Run(command) +} + +func (s *RuntimeSupervisor) Ports(id string) ([]domain.RuntimePortStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.Ports() +} + +func (s *RuntimeSupervisor) RoutingTargets(id string) ([]domain.RuntimeRoutingTarget, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.RoutingTargets() +} + +func (s *RuntimeSupervisor) ApplyRouting(id string, assignments []domain.RuntimeRouteAssignment) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.ApplyRouting(assignments) +} + +func (s *RuntimeSupervisor) Backup(id string, artifact string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + if err := session.Backup(context.Background(), artifact, registryCredentials); err != nil { + session.markError(err) + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) Restore(id string, artifact string, restart bool, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + if err := session.Restore(context.Background(), artifact, registryCredentials); err != nil { + session.markError(err) + return nil, err + } + if restart { + return s.StartScroll(id) + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) ScrollFile(id string) (*domain.File, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.scrollService.GetFile(), nil +} + +func (s *RuntimeSupervisor) Queue(id string) (map[string]domain.ScrollLockStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.queueManager.GetQueue(), nil +} + +func (s *RuntimeSupervisor) Procedures(id string) (map[string]domain.ScrollLockStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + return session.Procedures(), nil +} diff --git a/apps/druid/core/services/runtime_controller.go b/apps/druid/core/services/runtime_controller.go deleted file mode 100644 index 7e6f6e4c..00000000 --- a/apps/druid/core/services/runtime_controller.go +++ /dev/null @@ -1,919 +0,0 @@ -package services - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" - coreservices "github.com/highcard-dev/daemon/internal/core/services" - runtimebackend "github.com/highcard-dev/daemon/internal/runtime" - runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" - "github.com/highcard-dev/daemon/internal/utils" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -var ErrRuntimeMaterializationUnsupported = errors.New("runtime backend does not support daemon materialization") -var ErrRuntimeOperationUnsupported = errors.New("runtime backend does not support this operation") - -var newKubernetesRuntimeStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { - return runtimekubernetes.NewConfigMapStateStore(config) -} - -func NewRuntimeStore(stateDir string) (coreservices.RuntimeScrollStore, error) { - if stateDir == "" { - defaultStateDir, err := utils.DefaultRuntimeStateDir() - if err != nil { - return nil, err - } - stateDir = defaultStateDir - } - return coreservices.NewRuntimeStateStore(stateDir), nil -} - -func NewRuntimeStoreForBackend(stateDir string, runtimeBackend string, kubernetesConfig runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { - if runtimeBackend == "kubernetes" { - return newKubernetesRuntimeStore(kubernetesConfig) - } - return NewRuntimeStore(stateDir) -} - -func LoadRuntimeScroll(stateDir string, id string) (coreservices.RuntimeScrollStore, *domain.RuntimeScroll, error) { - store, err := NewRuntimeStore(stateDir) - if err != nil { - return nil, nil, err - } - runtimeScroll, err := store.GetScroll(id) - if err != nil { - if errors.Is(err, coreservices.ErrScrollNotFound) { - return nil, nil, fmt.Errorf("runtime scroll %s not found", id) - } - return nil, nil, err - } - return store, runtimeScroll, nil -} - -type RuntimeSupervisor struct { - store coreservices.RuntimeScrollStore - manager *coreservices.RuntimeScrollManager - consoleService *coreservices.ConsoleManager - runtimeBackend string - runtimeOptions runtimebackend.Options - - mu sync.Mutex - sessions map[string]*RuntimeSession -} - -func NewRuntimeSupervisor( - store coreservices.RuntimeScrollStore, - manager *coreservices.RuntimeScrollManager, - consoleService *coreservices.ConsoleManager, - runtimeBackend string, - options ...runtimebackend.Option, -) *RuntimeSupervisor { - runtimeOptions := runtimebackend.Options{} - for _, option := range options { - option(&runtimeOptions) - } - return &RuntimeSupervisor{ - store: store, - manager: manager, - consoleService: consoleService, - runtimeBackend: runtimeBackend, - runtimeOptions: runtimeOptions, - sessions: map[string]*RuntimeSession{}, - } -} - -func (s *RuntimeSupervisor) Start() error { - scrolls, err := s.store.ListScrolls() - if err != nil { - return err - } - for _, runtimeScroll := range scrolls { - if runtimeScroll.Status == domain.RuntimeScrollStatusDeleted { - continue - } - session, err := s.startSession(runtimeScroll) - if err != nil { - s.markScrollError(runtimeScroll, err) - continue - } - if err := session.Hydrate(); err != nil { - s.markScrollError(runtimeScroll, err) - continue - } - } - return nil -} - -func (s *RuntimeSupervisor) Create(artifact string, name string, scrollRoot string, dataRoot string, start bool) (*domain.RuntimeScroll, error) { - return s.create(artifact, name, scrollRoot, dataRoot, start) -} - -func (s *RuntimeSupervisor) create(artifact string, name string, scrollRoot string, dataRoot string, autoStart bool) (*domain.RuntimeScroll, error) { - runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) - if err != nil { - return nil, err - } - var scrollYAML []byte - if scrollRoot == "" && dataRoot == "" { - materializer, ok := runtimeService.(ports.RuntimeMaterializerInterface) - if !ok { - return nil, ErrRuntimeMaterializationUnsupported - } - materialized, err := materializer.MaterializeScroll(context.Background(), artifact, name) - if err != nil { - return nil, err - } - if materialized.Artifact != "" { - artifact = materialized.Artifact - } - scrollRoot = materialized.ScrollRoot - dataRoot = materialized.DataRoot - scrollYAML = materialized.ScrollYAML - } else { - scrollYAML, err = runtimeService.ReadScrollFile(scrollRoot) - if err != nil { - return nil, err - } - } - runtimeScroll, err := s.manager.Create(artifact, name, scrollRoot, dataRoot, scrollYAML) - if err != nil { - return nil, err - } - session, err := s.startSession(runtimeScroll) - if err != nil { - runtimeScroll.Status = domain.RuntimeScrollStatusError - runtimeScroll.LastError = err.Error() - _ = s.store.UpdateScroll(runtimeScroll) - return nil, err - } - if autoStart { - if err := session.AutoStartServe(); err != nil { - runtimeScroll.Status = domain.RuntimeScrollStatusError - runtimeScroll.LastError = err.Error() - _ = s.store.UpdateScroll(runtimeScroll) - return nil, err - } - } - return runtimeScroll, nil -} - -func (s *RuntimeSupervisor) Ensure(artifact string, name string, scrollRoot string, dataRoot string, start bool) (*domain.RuntimeScroll, error) { - id := coreservices.RuntimeScrollIDFromName(name) - if id != "" { - runtimeScroll, err := s.store.GetScroll(id) - if err == nil { - if start { - return s.StartScroll(runtimeScroll.ID) - } - return runtimeScroll, nil - } - if !errors.Is(err, coreservices.ErrScrollNotFound) { - return nil, err - } - } - runtimeScroll, err := s.create(artifact, name, scrollRoot, dataRoot, start) - if err != nil { - return nil, err - } - return runtimeScroll, nil -} - -func (s *RuntimeSupervisor) List() ([]*domain.RuntimeScroll, error) { - return s.store.ListScrolls() -} - -func (s *RuntimeSupervisor) Get(id string) (*domain.RuntimeScroll, error) { - return s.store.GetScroll(id) -} - -func (s *RuntimeSupervisor) Delete(id string) error { - return s.DeleteWithPolicy(id, false) -} - -func (s *RuntimeSupervisor) DeleteWithPolicy(id string, purgeData bool) error { - s.mu.Lock() - session := s.sessions[id] - delete(s.sessions, id) - s.mu.Unlock() - if session == nil { - var err error - session, err = s.sessionFor(id) - if err != nil { - return err - } - s.mu.Lock() - delete(s.sessions, id) - s.mu.Unlock() - } - if err := session.DeleteRuntime(purgeData); err != nil { - return err - } - session.Shutdown() - return s.store.DeleteScroll(id) -} - -func (s *RuntimeSupervisor) StartScroll(id string) (*domain.RuntimeScroll, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - if err := session.AutoStartServe(); err != nil { - session.markError(err) - return nil, err - } - session.mu.Lock() - session.runtimeScroll.Status = deriveRuntimeScrollStatus(session.runtimeScroll.Commands, session.scrollService.GetFile().Commands) - if session.runtimeScroll.Status == domain.RuntimeScrollStatusCreated { - session.runtimeScroll.Status = domain.RuntimeScrollStatusRunning - } - session.runtimeScroll.LastError = "" - err = s.store.UpdateScroll(session.runtimeScroll) - id = session.runtimeScroll.ID - session.mu.Unlock() - if err != nil { - return nil, err - } - return s.store.GetScroll(id) -} - -func (s *RuntimeSupervisor) Stop(id string) (*domain.RuntimeScroll, error) { - s.mu.Lock() - session := s.sessions[id] - delete(s.sessions, id) - s.mu.Unlock() - if session == nil { - var err error - session, err = s.sessionFor(id) - if err != nil { - return nil, err - } - s.mu.Lock() - delete(s.sessions, id) - s.mu.Unlock() - } - if err := session.StopRuntime(); err != nil { - session.markError(err) - return nil, err - } - session.Shutdown() - return s.store.GetScroll(id) -} - -func (s *RuntimeSupervisor) Run(id string, command string) (*domain.RuntimeScroll, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.Run(command) -} - -func (s *RuntimeSupervisor) Ports(id string) ([]domain.RuntimePortStatus, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.Ports() -} - -func (s *RuntimeSupervisor) RoutingTargets(id string) ([]domain.RuntimeRoutingTarget, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.RoutingTargets() -} - -func (s *RuntimeSupervisor) ApplyRouting(id string, assignments []domain.RuntimeRouteAssignment) (*domain.RuntimeScroll, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.ApplyRouting(assignments) -} - -func (s *RuntimeSupervisor) Backup(id string, artifact string) (*domain.RuntimeScroll, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - if err := session.Backup(context.Background(), artifact); err != nil { - session.markError(err) - return nil, err - } - return s.store.GetScroll(id) -} - -func (s *RuntimeSupervisor) Restore(id string, artifact string, restart bool) (*domain.RuntimeScroll, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - if err := session.Restore(context.Background(), artifact); err != nil { - session.markError(err) - return nil, err - } - if restart { - return s.StartScroll(id) - } - return s.store.GetScroll(id) -} - -func (s *RuntimeSupervisor) DataFile(id string, relativePath string) ([]byte, error) { - runtimeScroll, err := s.store.GetScroll(id) - if err != nil { - return nil, err - } - runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) - if err != nil { - return nil, err - } - fileBackend, ok := runtimeService.(ports.RuntimeFileBackendInterface) - if !ok { - return nil, ErrRuntimeOperationUnsupported - } - return fileBackend.ReadDataFile(context.Background(), runtimeScroll.DataRoot, relativePath) -} - -func (s *RuntimeSupervisor) WriteDataFile(id string, relativePath string, data []byte) error { - runtimeScroll, err := s.store.GetScroll(id) - if err != nil { - return err - } - runtimeService, err := runtimebackend.NewBackend(s.runtimeBackend, s.consoleService, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) - if err != nil { - return err - } - fileBackend, ok := runtimeService.(ports.RuntimeFileBackendInterface) - if !ok { - return ErrRuntimeOperationUnsupported - } - return fileBackend.WriteDataFile(context.Background(), runtimeScroll.DataRoot, relativePath, data) -} - -func (s *RuntimeSupervisor) ScrollFile(id string) (*domain.File, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.scrollService.GetFile(), nil -} - -func (s *RuntimeSupervisor) Queue(id string) (map[string]domain.ScrollLockStatus, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.queueManager.GetQueue(), nil -} - -func (s *RuntimeSupervisor) Procedures(id string) (map[string]domain.ScrollLockStatus, error) { - session, err := s.sessionFor(id) - if err != nil { - return nil, err - } - return session.queueManager.GetQueue(), nil -} - -func (s *RuntimeSupervisor) sessionFor(id string) (*RuntimeSession, error) { - s.mu.Lock() - session := s.sessions[id] - s.mu.Unlock() - if session != nil { - return session, nil - } - runtimeScroll, err := s.store.GetScroll(id) - if err != nil { - return nil, err - } - return s.startSession(runtimeScroll) -} - -func (s *RuntimeSupervisor) startSession(runtimeScroll *domain.RuntimeScroll) (*RuntimeSession, error) { - s.mu.Lock() - if session := s.sessions[runtimeScroll.ID]; session != nil { - s.mu.Unlock() - return session, nil - } - s.mu.Unlock() - - session, err := NewRuntimeSession(s.store, runtimeScroll, s.consoleService, s.runtimeBackend, runtimebackend.WithKubernetesConfig(s.runtimeOptions.Kubernetes)) - if err != nil { - return nil, err - } - session.Start() - - s.mu.Lock() - if existing := s.sessions[runtimeScroll.ID]; existing != nil { - s.mu.Unlock() - session.Shutdown() - return existing, nil - } - s.sessions[runtimeScroll.ID] = session - s.mu.Unlock() - return session, nil -} - -func (s *RuntimeSupervisor) markScrollError(runtimeScroll *domain.RuntimeScroll, err error) { - logger.Log().Error("failed to restore runtime scroll", zap.String("scroll", runtimeScroll.ID), zap.Error(err)) - runtimeScroll.Status = domain.RuntimeScrollStatusError - runtimeScroll.LastError = err.Error() - if runtimeScroll.Commands == nil { - runtimeScroll.Commands = map[string]domain.LockStatus{} - } - _ = s.store.UpdateScroll(runtimeScroll) -} - -type RuntimeSession struct { - store coreservices.RuntimeScrollStore - runtimeScroll *domain.RuntimeScroll - scrollService *coreservices.ScrollService - queueManager *coreservices.QueueManager - runtimeBackend ports.RuntimeBackendInterface - - mu sync.Mutex - started bool -} - -func NewRuntimeSession( - store coreservices.RuntimeScrollStore, - runtimeScroll *domain.RuntimeScroll, - consoleService *coreservices.ConsoleManager, - runtimeBackend string, - options ...runtimebackend.Option, -) (*RuntimeSession, error) { - runtimeService, err := runtimebackend.NewBackend(runtimeBackend, consoleService, options...) - if err != nil { - return nil, err - } - if runtimeScroll.DataRoot == "" { - return nil, fmt.Errorf("runtime scroll %s has no data root", runtimeScroll.ID) - } - scrollYAML := []byte(runtimeScroll.ScrollYAML) - if len(scrollYAML) == 0 { - scrollYAML, err = runtimeService.ReadScrollFile(runtimeScroll.ScrollRoot) - if err != nil { - return nil, err - } - runtimeScroll.ScrollYAML = string(scrollYAML) - if err := store.UpdateScroll(runtimeScroll); err != nil { - return nil, err - } - } - scrollService, err := coreservices.NewCachedScrollService(runtimeScroll.ScrollRoot, scrollYAML) - if err != nil { - return nil, err - } - session := &RuntimeSession{ - store: store, - runtimeScroll: runtimeScroll, - scrollService: scrollService, - runtimeBackend: runtimeService, - } - processLauncher, err := coreservices.NewProcedureLauncherForRuntime(scrollService, runtimeService, runtimeScroll.DataRoot, runtimeScroll.ID, runtimeScroll.ScrollName, func() []domain.RuntimeRouteAssignment { - session.mu.Lock() - defer session.mu.Unlock() - routing := make([]domain.RuntimeRouteAssignment, len(session.runtimeScroll.Routing)) - copy(routing, session.runtimeScroll.Routing) - return routing - }) - if err != nil { - return nil, err - } - queueManager := coreservices.NewQueueManager(scrollService, processLauncher) - session.queueManager = queueManager - queueManager.SetStatusObserver(session.persistCommandStatus) - return session, nil -} - -func (s *RuntimeSession) Start() { - s.mu.Lock() - defer s.mu.Unlock() - if s.started { - return - } - s.started = true - go s.queueManager.Work() -} - -func (s *RuntimeSession) Hydrate() error { - s.mu.Lock() - statuses := copyCommandStatuses(s.runtimeScroll.Commands) - runtimeStatus := s.runtimeScroll.Status - s.mu.Unlock() - commands := s.scrollService.GetFile().Commands - if len(statuses) > 0 { - filtered := map[string]domain.LockStatus{} - removedStaleStatus := false - for commandName, status := range statuses { - command := commands[commandName] - if command == nil { - removedStaleStatus = true - continue - } - // Kubernetes keeps persistent workloads alive; do not requeue them just because - // the singleton API process restarted. - if runtimeStatus == domain.RuntimeScrollStatusRunning && status.Status == domain.ScrollLockStatusDone && command.Run == domain.RunModePersistent { - continue - } - filtered[commandName] = status - } - if removedStaleStatus { - s.mu.Lock() - for commandName := range s.runtimeScroll.Commands { - if commands[commandName] == nil { - delete(s.runtimeScroll.Commands, commandName) - } - } - err := s.store.UpdateScroll(s.runtimeScroll) - s.mu.Unlock() - if err != nil { - return err - } - } - statuses = filtered - if err := s.queueManager.HydrateCommandStatuses(statuses); err != nil { - return err - } - } - if err := s.AutoStartServe(); err != nil { - return err - } - s.mu.Lock() - s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) - err := s.store.UpdateScroll(s.runtimeScroll) - s.mu.Unlock() - return err -} - -func (s *RuntimeSession) AutoStartServe() error { - serveCommand := s.scrollService.GetFile().Serve - if serveCommand == "" { - return nil - } - if err := WriteRuntimeConfig(s.runtimeScroll, s.scrollService.GetFile(), s.runtimeBackend.Name()); err != nil { - return err - } - if command := s.scrollService.GetFile().Commands[serveCommand]; command != nil && command.Run == domain.RunModePersistent { - s.mu.Lock() - status, ok := s.runtimeScroll.Commands[serveCommand] - runtimeStatus := s.runtimeScroll.Status - s.mu.Unlock() - if ok && status.Status == domain.ScrollLockStatusDone && runtimeStatus == domain.RuntimeScrollStatusRunning { - return nil - } - } - if err := s.queueManager.AddForcedItem(serveCommand); err != nil && !errors.Is(err, coreservices.ErrAlreadyInQueue) { - return err - } - return nil -} - -func (s *RuntimeSession) Run(command string) (*domain.RuntimeScroll, error) { - if err := WriteRuntimeConfig(s.runtimeScroll, s.scrollService.GetFile(), s.runtimeBackend.Name()); err != nil { - return nil, err - } - s.refreshCommandState() - targetCommand, err := s.scrollService.GetCommand(command) - if err != nil { - s.markError(err) - return nil, err - } - longRunning := targetCommand.Run == domain.RunModeRestart || targetCommand.Run == domain.RunModePersistent - s.rememberDoneDependencies(targetCommand, map[string]bool{}) - - if err := s.queueManager.AddTempItem(command); err != nil { - s.markError(err) - return nil, err - } - if !longRunning { - s.queueManager.WaitUntilEmpty() - } - - s.mu.Lock() - s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) - err = s.store.UpdateScroll(s.runtimeScroll) - id := s.runtimeScroll.ID - s.mu.Unlock() - if err != nil { - return nil, err - } - return s.store.GetScroll(id) -} - -func (s *RuntimeSession) refreshCommandState() { - fresh, err := s.store.GetScroll(s.runtimeScroll.ID) - if err != nil { - return - } - commands := s.scrollService.GetFile().Commands - removedStaleStatus := false - for commandName := range fresh.Commands { - if commands[commandName] == nil { - delete(fresh.Commands, commandName) - removedStaleStatus = true - } - } - if removedStaleStatus { - _ = s.store.UpdateScroll(fresh) - } - s.mu.Lock() - s.runtimeScroll.Commands = copyCommandStatuses(fresh.Commands) - s.runtimeScroll.Status = fresh.Status - s.mu.Unlock() -} - -func (s *RuntimeSession) rememberDoneDependencies(command *domain.CommandInstructionSet, seen map[string]bool) { - if command == nil { - return - } - for _, dependency := range command.Needs { - if seen[dependency] { - continue - } - seen[dependency] = true - status, ok := s.runtimeScroll.Commands[dependency] - if ok && status.Status == domain.ScrollLockStatusDone { - s.queueManager.RememberDoneItem(dependency) - } - dependencyCommand, err := s.scrollService.GetCommand(dependency) - if err == nil { - s.rememberDoneDependencies(dependencyCommand, seen) - } - } -} - -func (s *RuntimeSession) Ports() ([]domain.RuntimePortStatus, error) { - s.mu.Lock() - runtimeScroll := *s.runtimeScroll - s.mu.Unlock() - return s.runtimeBackend.ExpectedPorts(runtimeScroll.DataRoot, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) -} - -func (s *RuntimeSession) RoutingTargets() ([]domain.RuntimeRoutingTarget, error) { - routingBackend, ok := s.runtimeBackend.(ports.RuntimeRoutingBackendInterface) - if !ok { - return nil, ErrRuntimeOperationUnsupported - } - s.mu.Lock() - runtimeScroll := *s.runtimeScroll - s.mu.Unlock() - return routingBackend.RoutingTargets(runtimeScroll.DataRoot, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) -} - -func (s *RuntimeSession) ApplyRouting(assignments []domain.RuntimeRouteAssignment) (*domain.RuntimeScroll, error) { - s.mu.Lock() - s.runtimeScroll.Routing = assignments - s.runtimeScroll.LastError = "" - err := s.store.UpdateScroll(s.runtimeScroll) - id := s.runtimeScroll.ID - s.mu.Unlock() - if err != nil { - return nil, err - } - return s.store.GetScroll(id) -} - -func (s *RuntimeSession) StopRuntime() error { - lifecycleBackend, ok := s.runtimeBackend.(ports.RuntimeLifecycleBackendInterface) - if !ok { - return ErrRuntimeOperationUnsupported - } - s.mu.Lock() - dataRoot := s.runtimeScroll.DataRoot - s.mu.Unlock() - if err := lifecycleBackend.StopRuntime(dataRoot); err != nil { - return err - } - s.mu.Lock() - s.runtimeScroll.Status = domain.RuntimeScrollStatusStopped - s.runtimeScroll.LastError = "" - err := s.store.UpdateScroll(s.runtimeScroll) - s.mu.Unlock() - return err -} - -func (s *RuntimeSession) DeleteRuntime(purgeData bool) error { - lifecycleBackend, ok := s.runtimeBackend.(ports.RuntimeLifecycleBackendInterface) - if !ok { - return ErrRuntimeOperationUnsupported - } - s.mu.Lock() - dataRoot := s.runtimeScroll.DataRoot - s.mu.Unlock() - return lifecycleBackend.DeleteRuntime(dataRoot, purgeData) -} - -func (s *RuntimeSession) Backup(ctx context.Context, artifact string) error { - backupBackend, ok := s.runtimeBackend.(ports.RuntimeBackupBackendInterface) - if !ok { - return ErrRuntimeOperationUnsupported - } - s.mu.Lock() - dataRoot := s.runtimeScroll.DataRoot - s.mu.Unlock() - return backupBackend.BackupRuntime(ctx, dataRoot, artifact) -} - -func (s *RuntimeSession) Restore(ctx context.Context, artifact string) error { - backupBackend, ok := s.runtimeBackend.(ports.RuntimeBackupBackendInterface) - if !ok { - return ErrRuntimeOperationUnsupported - } - s.mu.Lock() - dataRoot := s.runtimeScroll.DataRoot - s.mu.Unlock() - if err := backupBackend.RestoreRuntime(ctx, dataRoot, artifact); err != nil { - return err - } - scrollYAML, err := s.runtimeBackend.ReadScrollFile(dataRoot) - if err != nil { - return err - } - scrollService, err := coreservices.NewCachedScrollService(dataRoot, scrollYAML) - if err != nil { - return err - } - processLauncher, err := coreservices.NewProcedureLauncherForRuntime(scrollService, s.runtimeBackend, dataRoot, s.runtimeScroll.ID, s.runtimeScroll.ScrollName, func() []domain.RuntimeRouteAssignment { - s.mu.Lock() - defer s.mu.Unlock() - routing := make([]domain.RuntimeRouteAssignment, len(s.runtimeScroll.Routing)) - copy(routing, s.runtimeScroll.Routing) - return routing - }) - if err != nil { - return err - } - queueManager := coreservices.NewQueueManager(scrollService, processLauncher) - queueManager.SetStatusObserver(s.persistCommandStatus) - - s.mu.Lock() - commands := scrollService.GetFile().Commands - for commandName := range s.runtimeScroll.Commands { - if commands[commandName] == nil { - delete(s.runtimeScroll.Commands, commandName) - } - } - s.runtimeScroll.Artifact = artifact - s.runtimeScroll.ScrollRoot = dataRoot - s.runtimeScroll.ScrollYAML = string(scrollYAML) - s.runtimeScroll.Status = domain.RuntimeScrollStatusStopped - s.runtimeScroll.LastError = "" - s.scrollService = scrollService - s.queueManager = queueManager - if s.started { - go queueManager.Work() - } - err = s.store.UpdateScroll(s.runtimeScroll) - s.mu.Unlock() - return err -} - -func (s *RuntimeSession) Shutdown() { - s.queueManager.Shutdown() -} - -func (s *RuntimeSession) persistCommandStatus(command string, status domain.ScrollLockStatus, exitCode *int) { - s.mu.Lock() - defer s.mu.Unlock() - commands := s.scrollService.GetFile().Commands - if commands[command] == nil { - return - } - if s.runtimeScroll.Commands == nil { - s.runtimeScroll.Commands = map[string]domain.LockStatus{} - } - for commandName := range s.runtimeScroll.Commands { - if commands[commandName] == nil { - delete(s.runtimeScroll.Commands, commandName) - } - } - s.runtimeScroll.Commands[command] = domain.LockStatus{ - Status: status, - ExitCode: exitCode, - LastStatusChange: time.Now().Unix(), - } - s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) - if err := s.store.UpdateScroll(s.runtimeScroll); err != nil { - logger.Log().Error("failed to persist command status", zap.String("scroll", s.runtimeScroll.ID), zap.String("command", command), zap.Error(err)) - } -} - -func (s *RuntimeSession) markError(err error) { - s.mu.Lock() - defer s.mu.Unlock() - s.runtimeScroll.Status = domain.RuntimeScrollStatusError - if err != nil { - s.runtimeScroll.LastError = err.Error() - } - _ = s.store.UpdateScroll(s.runtimeScroll) -} - -func WriteRuntimeConfig(runtimeScroll *domain.RuntimeScroll, scroll *domain.File, runtimeBackend string) error { - if strings.HasPrefix(runtimeScroll.DataRoot, "k8s://") { - return nil - } - configPath := filepath.Join(runtimeScroll.DataRoot, domain.RuntimeDataDir, domain.RuntimeConfigDir, domain.RuntimeConfigFile) - if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { - return err - } - config := domain.RuntimeConfig{ - SchemaVersion: "druid.runtime/v1", - Scroll: domain.RuntimeConfigScroll{ - ID: runtimeScroll.ID, - Name: runtimeScroll.ScrollName, - Artifact: runtimeScroll.Artifact, - }, - Paths: domain.RuntimeConfigPaths{ - Data: ".", - RuntimeConfig: filepath.ToSlash(filepath.Join(domain.RuntimeConfigDir, domain.RuntimeConfigFile)), - }, - Ports: scroll.Ports, - ExpectedPorts: runtimeExpectedPorts(scroll), - Runtime: domain.RuntimeConfigRuntime{ - Backend: runtimeBackend, - GeneratedAt: time.Now().UTC().Format(time.RFC3339Nano), - }, - } - data, err := json.MarshalIndent(config, "", " ") - if err != nil { - return err - } - return os.WriteFile(configPath, data, 0644) -} - -func runtimeExpectedPorts(scroll *domain.File) []domain.RuntimeExpectedPort { - portsByName := map[string]domain.Port{} - for _, port := range scroll.Ports { - portsByName[port.Name] = port - } - ports := []domain.RuntimeExpectedPort{} - for commandName, command := range scroll.Commands { - if command == nil { - continue - } - for idx, procedure := range command.Procedures { - if procedure == nil { - continue - } - procedureName := fmt.Sprintf("%s.%d", commandName, idx) - if procedure.Id != nil { - procedureName = *procedure.Id - } - for _, expectedPort := range procedure.ExpectedPorts { - port := portsByName[expectedPort.Name] - ports = append(ports, domain.RuntimeExpectedPort{ - Name: expectedPort.Name, - Procedure: procedureName, - Port: port.Port, - Protocol: port.Protocol, - KeepAliveTraffic: expectedPort.KeepAliveTraffic, - }) - } - } - } - return ports -} - -func deriveRuntimeScrollStatus(statuses map[string]domain.LockStatus, commands map[string]*domain.CommandInstructionSet) domain.RuntimeScrollStatus { - if len(statuses) == 0 { - return domain.RuntimeScrollStatusCreated - } - hasActive := false - hasPersistentDone := false - for commandName, status := range statuses { - if status.Status == domain.ScrollLockStatusError { - return domain.RuntimeScrollStatusError - } - if status.Status == domain.ScrollLockStatusRunning || status.Status == domain.ScrollLockStatusWaiting { - hasActive = true - } - if status.Status == domain.ScrollLockStatusDone { - if command := commands[commandName]; command != nil && command.Run == domain.RunModePersistent { - hasPersistentDone = true - } - } - } - if hasActive || hasPersistentDone { - return domain.RuntimeScrollStatusRunning - } - return domain.RuntimeScrollStatusStopped -} - -func copyCommandStatuses(statuses map[string]domain.LockStatus) map[string]domain.LockStatus { - copied := map[string]domain.LockStatus{} - for command, status := range statuses { - copied[command] = status - } - return copied -} diff --git a/apps/druid/core/services/runtime_controller_test.go b/apps/druid/core/services/runtime_controller_test.go deleted file mode 100644 index 07a764dc..00000000 --- a/apps/druid/core/services/runtime_controller_test.go +++ /dev/null @@ -1,404 +0,0 @@ -package services - -import ( - "os" - "path/filepath" - "testing" - - "github.com/highcard-dev/daemon/internal/core/domain" - coreservices "github.com/highcard-dev/daemon/internal/core/services" - runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" -) - -func TestRuntimeSessionUsesCachedScrollYAML(t *testing.T) { - scrollRoot := t.TempDir() - dataRoot := filepath.Join(t.TempDir(), "data") - runtimeScroll := &domain.RuntimeScroll{ - ID: "cached", - Artifact: "local", - ScrollRoot: scrollRoot, - DataRoot: dataRoot, - ScrollName: "cached", - ScrollYAML: `name: cached -desc: Cached scroll -version: 0.1.0 -app_version: "1.0" -serve: start -commands: - start: - procedures: - - image: alpine:3.20 - command: ["true"] -`, - } - - session, err := NewRuntimeSession(coreservices.NewRuntimeStateStore(t.TempDir()), runtimeScroll, coreservices.NewConsoleManager(coreservices.NewLogManager()), "docker") - if err != nil { - t.Fatal(err) - } - if got := session.scrollService.GetFile().Name; got != "cached" { - t.Fatalf("scroll name = %q, want cached", got) - } -} - -func TestRuntimeSessionHydrateAutoStartsServeWithoutPreviousStatus(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - assertQueued(t, session, "start") -} - -func TestRuntimeSessionHydrateForceRequeuesDoneServe(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ - "start": {Status: domain.ScrollLockStatusDone}, - }, cachedScrollYAML("start")) - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - assertQueued(t, session, "start") -} - -func TestRuntimeSessionHydrateRequeuesErrorServe(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ - "start": {Status: domain.ScrollLockStatusError}, - }, cachedScrollYAML("start")) - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - assertQueued(t, session, "start") -} - -func TestRuntimeSessionHydrateDoesNotDuplicateActiveServe(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ - "start": {Status: domain.ScrollLockStatusRunning}, - }, cachedScrollYAML("start")) - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - queue := session.queueManager.GetQueue() - if len(queue) != 1 { - t.Fatalf("queue len = %d, want 1: %#v", len(queue), queue) - } - if queue["start"] != domain.ScrollLockStatusWaiting { - t.Fatalf("start = %s, want waiting", queue["start"]) - } -} - -func TestRuntimeSessionHydrateSkipsMissingServe(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - if queue := session.queueManager.GetQueue(); len(queue) != 0 { - t.Fatalf("queue = %#v, want empty", queue) - } -} - -func TestRuntimeSessionHydrateDropsStaleCommandStatus(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ - "missing": {Status: domain.ScrollLockStatusDone}, - }, cachedScrollYAML("")) - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - updated, err := session.store.GetScroll(session.runtimeScroll.ID) - if err != nil { - t.Fatal(err) - } - if _, ok := updated.Commands["missing"]; ok { - t.Fatalf("stale command was not removed: %#v", updated.Commands) - } -} - -func TestRuntimeSessionHydrateDoesNotRequeueRunningPersistentServe(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ - "start": {Status: domain.ScrollLockStatusDone}, - }, `name: cached -desc: Cached scroll -version: 0.1.0 -app_version: "1.0" -serve: start -commands: - start: - run: persistent - procedures: - - image: alpine:3.20 - command: ["true"] -`) - session.runtimeScroll.Status = domain.RuntimeScrollStatusRunning - if err := session.store.UpdateScroll(session.runtimeScroll); err != nil { - t.Fatal(err) - } - - if err := session.Hydrate(); err != nil { - t.Fatal(err) - } - - if queue := session.queueManager.GetQueue(); len(queue) != 0 { - t.Fatalf("queue = %#v, want empty", queue) - } -} - -func TestRuntimeSessionAutoStartsServeOnCreatePath(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) - - if err := session.AutoStartServe(); err != nil { - t.Fatal(err) - } - - assertQueued(t, session, "start") -} - -func TestRuntimeSupervisorEnsureCanCreateWithoutStarting(t *testing.T) { - scrollRoot := t.TempDir() - dataRoot := scrollRoot - if err := os.WriteFile(filepath.Join(scrollRoot, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { - t.Fatal(err) - } - store := coreservices.NewRuntimeStateStore(t.TempDir()) - supervisor := NewRuntimeSupervisor( - store, - coreservices.NewRuntimeScrollManager(store), - coreservices.NewConsoleManager(coreservices.NewLogManager()), - "docker", - ) - - runtimeScroll, err := supervisor.Ensure("local", "quiet-scroll", scrollRoot, dataRoot, false) - if err != nil { - t.Fatal(err) - } - - if runtimeScroll.Status != domain.RuntimeScrollStatusCreated { - t.Fatalf("status = %s, want created", runtimeScroll.Status) - } - if len(runtimeScroll.Commands) != 0 { - t.Fatalf("commands = %#v, want empty", runtimeScroll.Commands) - } -} - -func TestRuntimeSupervisorCreateCanCreateWithoutStarting(t *testing.T) { - root := t.TempDir() - if err := os.WriteFile(filepath.Join(root, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { - t.Fatal(err) - } - store := coreservices.NewRuntimeStateStore(t.TempDir()) - supervisor := NewRuntimeSupervisor( - store, - coreservices.NewRuntimeScrollManager(store), - coreservices.NewConsoleManager(coreservices.NewLogManager()), - "docker", - ) - - runtimeScroll, err := supervisor.Create("local", "quiet-create", root, root, false) - if err != nil { - t.Fatal(err) - } - - if runtimeScroll.Status != domain.RuntimeScrollStatusCreated { - t.Fatalf("status = %s, want created", runtimeScroll.Status) - } - if len(runtimeScroll.Commands) != 0 { - t.Fatalf("commands = %#v, want empty", runtimeScroll.Commands) - } -} - -func TestRuntimeSessionApplyRoutingPersistsAssignments(t *testing.T) { - session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) - - updated, err := session.ApplyRouting([]domain.RuntimeRouteAssignment{{ - Name: "web-http", - PortName: "http", - Host: "scroll.example.test", - PublicPort: 443, - URL: "https://scroll.example.test", - Protocol: "https", - }}) - if err != nil { - t.Fatal(err) - } - - if len(updated.Routing) != 1 || updated.Routing[0].Host != "scroll.example.test" { - t.Fatalf("routing = %#v", updated.Routing) - } -} - -func TestDeriveRuntimeScrollStatusTreatsDonePersistentAsRunning(t *testing.T) { - status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ - "start": {Status: domain.ScrollLockStatusDone}, - }, map[string]*domain.CommandInstructionSet{ - "start": {Run: domain.RunModePersistent}, - }) - - if status != domain.RuntimeScrollStatusRunning { - t.Fatalf("status = %s, want running", status) - } -} - -func TestDeriveRuntimeScrollStatusTreatsDoneFiniteAsStopped(t *testing.T) { - status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ - "report": {Status: domain.ScrollLockStatusDone}, - }, map[string]*domain.CommandInstructionSet{ - "report": {Run: domain.RunModeAlways}, - }) - - if status != domain.RuntimeScrollStatusStopped { - t.Fatalf("status = %s, want stopped", status) - } -} - -func TestNewRuntimeStoreForBackendUsesKubernetesStoreWithoutStateDB(t *testing.T) { - stateDir := t.TempDir() - called := false - previous := newKubernetesRuntimeStore - newKubernetesRuntimeStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { - called = true - if config.Namespace != "druid" { - t.Fatalf("namespace = %s, want druid", config.Namespace) - } - return fakeRuntimeScrollStore{state: "kubernetes:druid/configmaps"}, nil - } - t.Cleanup(func() { - newKubernetesRuntimeStore = previous - }) - - store, err := NewRuntimeStoreForBackend(stateDir, "kubernetes", runtimekubernetes.Config{Namespace: "druid"}) - if err != nil { - t.Fatal(err) - } - if !called { - t.Fatal("kubernetes store factory was not called") - } - if store.StateDir() != "kubernetes:druid/configmaps" { - t.Fatalf("StateDir = %s, want kubernetes:druid/configmaps", store.StateDir()) - } - if _, err := os.Stat(filepath.Join(stateDir, "state.db")); !os.IsNotExist(err) { - t.Fatalf("state.db stat error = %v, want not exist", err) - } -} - -func TestWriteRuntimeConfigSkipsKubernetesRefs(t *testing.T) { - workingDir := t.TempDir() - previous, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - if err := os.Chdir(workingDir); err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - _ = os.Chdir(previous) - }) - - err = WriteRuntimeConfig(&domain.RuntimeScroll{ - ID: "container-lab", - Artifact: "artifact", - DataRoot: "k8s://druid/druid-container-lab-data", - ScrollName: "container-lab", - }, &domain.File{}, "kubernetes") - if err != nil { - t.Fatal(err) - } - if _, err := os.Stat(filepath.Join(workingDir, "k8s:")); !os.IsNotExist(err) { - t.Fatalf("k8s: stat error = %v, want not exist", err) - } -} - -func newRuntimeSessionForTest(t *testing.T, commands map[string]domain.LockStatus, scrollYAML string) *RuntimeSession { - t.Helper() - scrollRoot := t.TempDir() - dataRoot := filepath.Join(t.TempDir(), "data") - store := coreservices.NewRuntimeStateStore(t.TempDir()) - runtimeScroll := &domain.RuntimeScroll{ - ID: "cached", - Artifact: "local", - ScrollRoot: scrollRoot, - DataRoot: dataRoot, - ScrollName: "cached", - ScrollYAML: scrollYAML, - Commands: commands, - } - if err := store.CreateScroll(runtimeScroll); err != nil { - t.Fatal(err) - } - session, err := NewRuntimeSession(store, runtimeScroll, coreservices.NewConsoleManager(coreservices.NewLogManager()), "docker") - if err != nil { - t.Fatal(err) - } - return session -} - -type fakeRuntimeScrollStore struct { - state string -} - -func (f fakeRuntimeScrollStore) StateDir() string { - return f.state -} - -func (f fakeRuntimeScrollStore) ScrollRoot(id string) string { - return "" -} - -func (f fakeRuntimeScrollStore) DataRoot(id string) string { - return "" -} - -func (f fakeRuntimeScrollStore) CreateScroll(scroll *domain.RuntimeScroll) error { - return nil -} - -func (f fakeRuntimeScrollStore) ListScrolls() ([]*domain.RuntimeScroll, error) { - return nil, nil -} - -func (f fakeRuntimeScrollStore) GetScroll(id string) (*domain.RuntimeScroll, error) { - return nil, coreservices.ErrScrollNotFound -} - -func (f fakeRuntimeScrollStore) UpdateScroll(scroll *domain.RuntimeScroll) error { - return nil -} - -func (f fakeRuntimeScrollStore) DeleteScroll(id string) error { - return nil -} - -func cachedScrollYAML(serve string) string { - yaml := `name: cached -desc: Cached scroll -version: 0.1.0 -app_version: "1.0" -` - if serve != "" { - yaml += "serve: " + serve + "\n" - } - yaml += `commands: - start: - run: once - procedures: - - image: alpine:3.20 - command: ["true"] -` - return yaml -} - -func assertQueued(t *testing.T, session *RuntimeSession, command string) { - t.Helper() - queue := session.queueManager.GetQueue() - if queue[command] != domain.ScrollLockStatusWaiting { - t.Fatalf("%s = %s, want waiting; queue=%#v", command, queue[command], queue) - } -} diff --git a/apps/druid/core/services/runtime_dev.go b/apps/druid/core/services/runtime_dev.go new file mode 100644 index 00000000..4ce56541 --- /dev/null +++ b/apps/druid/core/services/runtime_dev.go @@ -0,0 +1,156 @@ +package services + +import ( + "context" + "fmt" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + "gopkg.in/yaml.v2" +) + +type DevWatchRequest struct { + WatchPaths []string `json:"watchPaths"` + HotReloadCommands []string `json:"hotReloadCommands,omitempty"` +} + +type DevWatchStatus struct { + Enabled bool `json:"enabled"` + WatchedPaths []string `json:"watchedPaths"` +} + +func (s *RuntimeSupervisor) AddCommand(id string, command string, instruction *domain.CommandInstructionSet) error { + session, err := s.sessionFor(id) + if err != nil { + return err + } + return session.AddCommand(command, instruction) +} + +func (s *RuntimeSupervisor) EnableDevWatch(id string, request DevWatchRequest) (DevWatchStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return DevWatchStatus{}, err + } + return session.EnableDevWatch(request) +} + +func (s *RuntimeSupervisor) DisableDevWatch(id string) (DevWatchStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return DevWatchStatus{}, err + } + return session.DisableDevWatch() +} + +func (s *RuntimeSupervisor) DevWatchStatus(id string) (DevWatchStatus, error) { + session, err := s.sessionFor(id) + if err != nil { + return DevWatchStatus{}, err + } + return session.DevWatchStatus(), nil +} + +func (s *RuntimeSupervisor) SubscribeDevWatch(id string) (chan *[]byte, func(), error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, nil, err + } + ch := session.SubscribeDevWatch() + if ch == nil { + return nil, nil, fmt.Errorf("dev watch is not enabled") + } + return ch, func() { session.UnsubscribeDevWatch(ch) }, nil +} + +func (s *RuntimeSession) AddCommand(command string, instruction *domain.CommandInstructionSet) error { + if command == "" { + return fmt.Errorf("command is required") + } + if instruction == nil { + return fmt.Errorf("command instruction is required") + } + s.mu.Lock() + defer s.mu.Unlock() + file := s.scrollService.GetFile() + if file.Commands == nil { + file.Commands = map[string]*domain.CommandInstructionSet{} + } + file.Commands[command] = instruction + data, err := yaml.Marshal(file) + if err != nil { + return err + } + s.runtimeScroll.ScrollYAML = string(data) + return s.store.UpdateScroll(s.runtimeScroll) +} + +func (s *RuntimeSession) EnableDevWatch(request DevWatchRequest) (DevWatchStatus, error) { + if len(request.WatchPaths) == 0 { + request.WatchPaths = []string{"."} + } + for _, command := range request.HotReloadCommands { + if _, err := s.scrollService.GetCommand(command); err != nil { + return DevWatchStatus{}, err + } + } + s.mu.Lock() + root := s.runtimeScroll.Root + id := s.runtimeScroll.ID + routing := append([]domain.RuntimeRouteAssignment(nil), s.runtimeScroll.Routing...) + s.devWatchPaths = append([]string(nil), request.WatchPaths...) + s.devCommands = append([]string(nil), request.HotReloadCommands...) + s.mu.Unlock() + + if s.devDaemonURL == "" { + return DevWatchStatus{}, fmt.Errorf("dev daemon URL is not configured") + } + if err := s.runtimeBackend.StartDev(context.Background(), ports.RuntimeDevAction{ + RuntimeID: id, + OwnerID: s.runtimeScroll.OwnerID, + RootRef: root, + MountPath: "/scroll", + Listen: ":8084", + WatchPaths: request.WatchPaths, + HotReloadCommands: request.HotReloadCommands, + Routing: routing, + DaemonURL: s.devDaemonURL, + DaemonToken: s.devDaemonToken, + AuthJWKSURL: s.devAuthJWKSURL, + RuntimeJWKSURL: s.devRuntimeJWKSURL, + }); err != nil { + return DevWatchStatus{}, err + } + return s.DevWatchStatus(), nil +} + +func (s *RuntimeSession) DisableDevWatch() (DevWatchStatus, error) { + s.mu.Lock() + root := s.runtimeScroll.Root + s.devWatchPaths = nil + s.devCommands = nil + s.mu.Unlock() + if err := s.runtimeBackend.StopDev(context.Background(), root); err != nil { + return DevWatchStatus{}, err + } + return s.DevWatchStatus(), nil +} + +func (s *RuntimeSession) DevWatchStatus() DevWatchStatus { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.devWatchPaths) == 0 { + return DevWatchStatus{Enabled: false, WatchedPaths: []string{}} + } + return DevWatchStatus{Enabled: true, WatchedPaths: append([]string(nil), s.devWatchPaths...)} +} + +func (s *RuntimeSession) SubscribeDevWatch() chan *[]byte { + return nil +} + +func (s *RuntimeSession) UnsubscribeDevWatch(ch chan *[]byte) { + if s.watchService != nil { + s.watchService.Unsubscribe(ch) + } +} diff --git a/apps/druid/core/services/runtime_lifecycle.go b/apps/druid/core/services/runtime_lifecycle.go new file mode 100644 index 00000000..2d212dca --- /dev/null +++ b/apps/druid/core/services/runtime_lifecycle.go @@ -0,0 +1,56 @@ +package services + +import "github.com/highcard-dev/daemon/internal/core/domain" + +func (s *RuntimeSupervisor) Delete(id string) error { + return s.DeleteWithPolicy(id, false) +} + +func (s *RuntimeSupervisor) DeleteWithPolicy(id string, purgeData bool) error { + session, err := s.detachSession(id) + if err != nil { + return err + } + if err := session.DeleteRuntime(purgeData); err != nil { + return err + } + session.Shutdown() + return s.store.DeleteScroll(id) +} + +func (s *RuntimeSupervisor) StartScroll(id string) (*domain.RuntimeScroll, error) { + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + if err := session.AutoStartServe(); err != nil { + session.markError(err) + return nil, err + } + session.mu.Lock() + session.runtimeScroll.Status = deriveRuntimeScrollStatus(session.runtimeScroll.Commands, session.scrollService.GetFile().Commands) + if session.runtimeScroll.Status == domain.RuntimeScrollStatusCreated { + session.runtimeScroll.Status = domain.RuntimeScrollStatusRunning + } + session.runtimeScroll.LastError = "" + err = s.store.UpdateScroll(session.runtimeScroll) + id = session.runtimeScroll.ID + session.mu.Unlock() + if err != nil { + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSupervisor) Stop(id string) (*domain.RuntimeScroll, error) { + session, err := s.detachSession(id) + if err != nil { + return nil, err + } + if err := session.StopRuntime(); err != nil { + session.markError(err) + return nil, err + } + session.Shutdown() + return s.store.GetScroll(id) +} diff --git a/apps/druid/core/services/runtime_materialization.go b/apps/druid/core/services/runtime_materialization.go new file mode 100644 index 00000000..25efe47d --- /dev/null +++ b/apps/druid/core/services/runtime_materialization.go @@ -0,0 +1,84 @@ +package services + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/core/services/registry" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +var ErrRuntimeMaterializationUnsupported = errors.New("runtime backend does not support daemon materialization") + +func (s *RuntimeSupervisor) materializeNewScroll(ctx context.Context, runtimeService ports.RuntimeBackendInterface, artifact string, name string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { + id := coreservices.RuntimeScrollIDFromName(name) + if id == "" { + return nil, ErrRuntimeMaterializationUnsupported + } + return s.runPullWorker(ctx, runtimeService, ports.RuntimeWorkerModeCreate, id, artifact, s.store.Root(id), registryCredentials) +} + +func (s *RuntimeSupervisor) runPullWorker(ctx context.Context, runtimeService ports.RuntimeBackendInterface, mode ports.RuntimeWorkerMode, runtimeID string, artifact string, root string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { + token, resultCh, err := s.workerCallbacks.Register(runtimeID) + if err != nil { + return nil, err + } + callbackURL := s.workerCallbackURL + "/internal/v1/workers/" + runtimeID + "/complete" + action := ports.RuntimeWorkerAction{ + Mode: mode, + RuntimeID: runtimeID, + Artifact: artifact, + RootRef: root, + MountPath: "/scroll", + CallbackURL: callbackURL, + CallbackToken: token, + RegistryCredentials: registryCredentials, + } + if err := runtimeService.SpawnPullWorker(ctx, action); err != nil { + s.workerCallbacks.Cancel(runtimeID) + return nil, err + } + waitCtx, cancel := context.WithTimeout(ctx, 20*time.Minute) + defer cancel() + select { + case result, ok := <-resultCh: + if !ok { + return nil, fmt.Errorf("worker callback closed before result") + } + if result.Error != "" { + return nil, errors.New(result.Error) + } + return &ports.RuntimeMaterialization{ + Artifact: artifact, + ArtifactDigest: result.ArtifactDigest, + Root: root, + ScrollYAML: []byte(result.ScrollYAML), + }, nil + case <-waitCtx.Done(): + s.workerCallbacks.Cancel(runtimeID) + return nil, fmt.Errorf("worker action for runtime %s timed out: %w", runtimeID, waitCtx.Err()) + } +} + +func resolveArtifactDigest(artifact string, registryCredentials []domain.RegistryCredential) string { + if artifact == "" { + return "" + } + if _, err := os.Stat(artifact); err == nil { + return "" + } + oci := registry.NewOciClient(registry.NewCredentialStore(registryCredentials)) + digest, err := oci.ResolveDigest(artifact) + if err != nil { + logger.Log().Warn("Unable to resolve artifact digest", zap.String("artifact", artifact), zap.Error(err)) + return "" + } + return digest +} diff --git a/apps/druid/core/services/runtime_session.go b/apps/druid/core/services/runtime_session.go new file mode 100644 index 00000000..344c4ff1 --- /dev/null +++ b/apps/druid/core/services/runtime_session.go @@ -0,0 +1,93 @@ +package services + +import ( + "fmt" + "sync" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" +) + +// RuntimeSession is the live execution view for one persisted scroll. It owns +// the command queue and cached scroll.yaml; storage and containers stay behind +// the runtime backend. +type RuntimeSession struct { + store coreservices.RuntimeScrollStore + runtimeScroll *domain.RuntimeScroll + scrollService *coreservices.ScrollService + queueManager *coreservices.QueueManager + watchService ports.WatchServiceInterface + runtimeBackend ports.RuntimeBackendInterface + procedures ports.ProcedureLauchnerInterface + devWatchPaths []string + devCommands []string + devDaemonURL string + devDaemonToken string + devAuthJWKSURL string + devRuntimeJWKSURL string + + mu sync.Mutex + started bool +} + +func NewRuntimeSession( + store coreservices.RuntimeScrollStore, + runtimeScroll *domain.RuntimeScroll, + runtimeService ports.RuntimeBackendInterface, +) (*RuntimeSession, error) { + if runtimeScroll.Root == "" { + return nil, fmt.Errorf("runtime scroll %s has no root", runtimeScroll.ID) + } + scrollYAML := []byte(runtimeScroll.ScrollYAML) + if len(scrollYAML) == 0 { + var err error + scrollYAML, err = runtimeService.ReadScrollFile(runtimeScroll.Root) + if err != nil { + return nil, err + } + runtimeScroll.ScrollYAML = string(scrollYAML) + if err := store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + } + scrollService, err := coreservices.NewCachedScrollService(runtimeScroll.Root, scrollYAML) + if err != nil { + return nil, err + } + session := &RuntimeSession{ + store: store, + runtimeScroll: runtimeScroll, + scrollService: scrollService, + runtimeBackend: runtimeService, + } + processLauncher, err := coreservices.NewProcedureLauncherForRuntime(scrollService, runtimeService, runtimeScroll.Root, runtimeScroll.ID, runtimeScroll.ScrollName, func() []domain.RuntimeRouteAssignment { + session.mu.Lock() + defer session.mu.Unlock() + routing := make([]domain.RuntimeRouteAssignment, len(session.runtimeScroll.Routing)) + copy(routing, session.runtimeScroll.Routing) + return routing + }) + if err != nil { + return nil, err + } + queueManager := coreservices.NewQueueManager(scrollService, processLauncher) + session.queueManager = queueManager + session.procedures = processLauncher + queueManager.SetStatusObserver(session.persistCommandStatus) + return session, nil +} + +func (s *RuntimeSession) Start() { + s.mu.Lock() + defer s.mu.Unlock() + if s.started { + return + } + s.started = true + go s.queueManager.Work() +} + +func (s *RuntimeSession) Shutdown() { + s.queueManager.Shutdown() +} diff --git a/apps/druid/core/services/runtime_session_cache.go b/apps/druid/core/services/runtime_session_cache.go new file mode 100644 index 00000000..710e2412 --- /dev/null +++ b/apps/druid/core/services/runtime_session_cache.go @@ -0,0 +1,78 @@ +package services + +import ( + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +func (s *RuntimeSupervisor) detachSession(id string) (*RuntimeSession, error) { + s.mu.Lock() + session := s.sessions[id] + delete(s.sessions, id) + s.mu.Unlock() + if session != nil { + return session, nil + } + session, err := s.sessionFor(id) + if err != nil { + return nil, err + } + s.mu.Lock() + delete(s.sessions, id) + s.mu.Unlock() + return session, nil +} + +func (s *RuntimeSupervisor) sessionFor(id string) (*RuntimeSession, error) { + s.mu.Lock() + session := s.sessions[id] + s.mu.Unlock() + if session != nil { + return session, nil + } + runtimeScroll, err := s.store.GetScroll(id) + if err != nil { + return nil, err + } + return s.startSession(runtimeScroll) +} + +func (s *RuntimeSupervisor) startSession(runtimeScroll *domain.RuntimeScroll) (*RuntimeSession, error) { + s.mu.Lock() + if session := s.sessions[runtimeScroll.ID]; session != nil { + s.mu.Unlock() + return session, nil + } + s.mu.Unlock() + + session, err := NewRuntimeSession(s.store, runtimeScroll, s.runtimeBackend) + if err != nil { + return nil, err + } + session.devDaemonURL = s.workerDaemonURL + session.devDaemonToken = s.internalToken + session.devAuthJWKSURL = s.authJWKSURL + session.devRuntimeJWKSURL = s.runtimeJWKSURL + session.Start() + + s.mu.Lock() + if existing := s.sessions[runtimeScroll.ID]; existing != nil { + s.mu.Unlock() + session.Shutdown() + return existing, nil + } + s.sessions[runtimeScroll.ID] = session + s.mu.Unlock() + return session, nil +} + +func (s *RuntimeSupervisor) markScrollError(runtimeScroll *domain.RuntimeScroll, err error) { + logger.Log().Error("failed to restore runtime scroll", zap.String("scroll", runtimeScroll.ID), zap.Error(err)) + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + if runtimeScroll.Commands == nil { + runtimeScroll.Commands = map[string]domain.LockStatus{} + } + _ = s.store.UpdateScroll(runtimeScroll) +} diff --git a/apps/druid/core/services/runtime_session_commands.go b/apps/druid/core/services/runtime_session_commands.go new file mode 100644 index 00000000..fae3c7a0 --- /dev/null +++ b/apps/druid/core/services/runtime_session_commands.go @@ -0,0 +1,188 @@ +package services + +import ( + "errors" + "time" + + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +func (s *RuntimeSession) Hydrate() error { + s.mu.Lock() + statuses := copyCommandStatuses(s.runtimeScroll.Commands) + runtimeStatus := s.runtimeScroll.Status + s.mu.Unlock() + commands := s.scrollService.GetFile().Commands + if len(statuses) > 0 { + filtered := map[string]domain.LockStatus{} + removedStaleStatus := false + for commandName, status := range statuses { + command := commands[commandName] + if command == nil { + removedStaleStatus = true + continue + } + // Kubernetes keeps persistent workloads alive; do not requeue them just because + // the singleton API process restarted. + if runtimeStatus == domain.RuntimeScrollStatusRunning && status.Status == domain.ScrollLockStatusDone && command.Run == domain.RunModePersistent { + continue + } + filtered[commandName] = status + } + if removedStaleStatus { + s.mu.Lock() + for commandName := range s.runtimeScroll.Commands { + if commands[commandName] == nil { + delete(s.runtimeScroll.Commands, commandName) + } + } + err := s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + if err != nil { + return err + } + } + statuses = filtered + if err := s.queueManager.HydrateCommandStatuses(statuses); err != nil { + return err + } + } + if err := s.AutoStartServe(); err != nil { + return err + } + s.mu.Lock() + s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) + err := s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + return err +} + +func (s *RuntimeSession) AutoStartServe() error { + serveCommand := s.scrollService.GetFile().Serve + if serveCommand == "" { + return nil + } + if command := s.scrollService.GetFile().Commands[serveCommand]; command != nil && command.Run == domain.RunModePersistent { + s.mu.Lock() + status, ok := s.runtimeScroll.Commands[serveCommand] + runtimeStatus := s.runtimeScroll.Status + s.mu.Unlock() + if ok && status.Status == domain.ScrollLockStatusDone && runtimeStatus == domain.RuntimeScrollStatusRunning { + return nil + } + } + if err := s.queueManager.AddForcedItem(serveCommand); err != nil && !errors.Is(err, coreservices.ErrAlreadyInQueue) { + return err + } + return nil +} + +func (s *RuntimeSession) Run(command string) (*domain.RuntimeScroll, error) { + s.refreshCommandState() + targetCommand, err := s.scrollService.GetCommand(command) + if err != nil { + s.markError(err) + return nil, err + } + longRunning := targetCommand.Run == domain.RunModeRestart || targetCommand.Run == domain.RunModePersistent + s.rememberDoneDependencies(targetCommand, map[string]bool{}) + + if err := s.queueManager.AddTempItem(command); err != nil { + s.markError(err) + return nil, err + } + if !longRunning { + s.queueManager.WaitUntilEmpty() + } + + s.mu.Lock() + s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) + err = s.store.UpdateScroll(s.runtimeScroll) + id := s.runtimeScroll.ID + s.mu.Unlock() + if err != nil { + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSession) refreshCommandState() { + fresh, err := s.store.GetScroll(s.runtimeScroll.ID) + if err != nil { + return + } + commands := s.scrollService.GetFile().Commands + removedStaleStatus := false + for commandName := range fresh.Commands { + if commands[commandName] == nil { + delete(fresh.Commands, commandName) + removedStaleStatus = true + } + } + if removedStaleStatus { + _ = s.store.UpdateScroll(fresh) + } + s.mu.Lock() + s.runtimeScroll.Commands = copyCommandStatuses(fresh.Commands) + s.runtimeScroll.Status = fresh.Status + s.mu.Unlock() +} + +func (s *RuntimeSession) rememberDoneDependencies(command *domain.CommandInstructionSet, seen map[string]bool) { + if command == nil { + return + } + for _, dependency := range command.Needs { + if seen[dependency] { + continue + } + seen[dependency] = true + status, ok := s.runtimeScroll.Commands[dependency] + if ok && status.Status == domain.ScrollLockStatusDone { + s.queueManager.RememberDoneItem(dependency) + } + dependencyCommand, err := s.scrollService.GetCommand(dependency) + if err == nil { + s.rememberDoneDependencies(dependencyCommand, seen) + } + } +} + +func (s *RuntimeSession) persistCommandStatus(command string, status domain.ScrollLockStatus, exitCode *int) { + s.mu.Lock() + defer s.mu.Unlock() + commands := s.scrollService.GetFile().Commands + if commands[command] == nil { + return + } + if s.runtimeScroll.Commands == nil { + s.runtimeScroll.Commands = map[string]domain.LockStatus{} + } + for commandName := range s.runtimeScroll.Commands { + if commands[commandName] == nil { + delete(s.runtimeScroll.Commands, commandName) + } + } + s.runtimeScroll.Commands[command] = domain.LockStatus{ + Status: status, + ExitCode: exitCode, + LastStatusChange: time.Now().Unix(), + } + s.runtimeScroll.Status = deriveRuntimeScrollStatus(s.runtimeScroll.Commands, s.scrollService.GetFile().Commands) + if err := s.store.UpdateScroll(s.runtimeScroll); err != nil { + logger.Log().Error("failed to persist command status", zap.String("scroll", s.runtimeScroll.ID), zap.String("command", command), zap.Error(err)) + } +} + +func (s *RuntimeSession) markError(err error) { + s.mu.Lock() + defer s.mu.Unlock() + s.runtimeScroll.Status = domain.RuntimeScrollStatusError + if err != nil { + s.runtimeScroll.LastError = err.Error() + } + _ = s.store.UpdateScroll(s.runtimeScroll) +} diff --git a/apps/druid/core/services/runtime_session_runtime.go b/apps/druid/core/services/runtime_session_runtime.go new file mode 100644 index 00000000..587d1c52 --- /dev/null +++ b/apps/druid/core/services/runtime_session_runtime.go @@ -0,0 +1,122 @@ +package services + +import ( + "context" + + "github.com/highcard-dev/daemon/internal/core/domain" + coreservices "github.com/highcard-dev/daemon/internal/core/services" +) + +func (s *RuntimeSession) Ports() ([]domain.RuntimePortStatus, error) { + s.mu.Lock() + runtimeScroll := *s.runtimeScroll + s.mu.Unlock() + return s.runtimeBackend.ExpectedPorts(runtimeScroll.Root, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) +} + +func (s *RuntimeSession) RoutingTargets() ([]domain.RuntimeRoutingTarget, error) { + s.mu.Lock() + runtimeScroll := *s.runtimeScroll + s.mu.Unlock() + return s.runtimeBackend.RoutingTargets(runtimeScroll.Root, s.scrollService.GetFile().Commands, s.scrollService.GetFile().Ports) +} + +func (s *RuntimeSession) Procedures() map[string]domain.ScrollLockStatus { + return s.procedures.GetProcedureStatuses() +} + +func (s *RuntimeSession) ApplyRouting(assignments []domain.RuntimeRouteAssignment) (*domain.RuntimeScroll, error) { + s.mu.Lock() + s.runtimeScroll.Routing = assignments + s.runtimeScroll.LastError = "" + err := s.store.UpdateScroll(s.runtimeScroll) + id := s.runtimeScroll.ID + s.mu.Unlock() + if err != nil { + return nil, err + } + return s.store.GetScroll(id) +} + +func (s *RuntimeSession) StopRuntime() error { + s.mu.Lock() + root := s.runtimeScroll.Root + s.mu.Unlock() + if err := s.runtimeBackend.StopRuntime(root); err != nil { + return err + } + s.mu.Lock() + s.runtimeScroll.Status = domain.RuntimeScrollStatusStopped + s.runtimeScroll.LastError = "" + err := s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + return err +} + +func (s *RuntimeSession) DeleteRuntime(purgeData bool) error { + s.mu.Lock() + root := s.runtimeScroll.Root + s.mu.Unlock() + return s.runtimeBackend.DeleteRuntime(root, purgeData) +} + +func (s *RuntimeSession) Backup(ctx context.Context, artifact string, registryCredentials []domain.RegistryCredential) error { + s.mu.Lock() + root := s.runtimeScroll.Root + s.mu.Unlock() + return s.runtimeBackend.BackupRuntime(ctx, root, artifact, registryCredentials) +} + +func (s *RuntimeSession) Restore(ctx context.Context, artifact string, registryCredentials []domain.RegistryCredential) error { + s.mu.Lock() + root := s.runtimeScroll.Root + s.mu.Unlock() + if err := s.runtimeBackend.RestoreRuntime(ctx, root, artifact, registryCredentials); err != nil { + return err + } + scrollYAML, err := s.runtimeBackend.ReadScrollFile(root) + if err != nil { + return err + } + scrollService, err := coreservices.NewCachedScrollService(root, scrollYAML) + if err != nil { + return err + } + processLauncher, err := coreservices.NewProcedureLauncherForRuntime(scrollService, s.runtimeBackend, root, s.runtimeScroll.ID, s.runtimeScroll.ScrollName, func() []domain.RuntimeRouteAssignment { + s.mu.Lock() + defer s.mu.Unlock() + routing := make([]domain.RuntimeRouteAssignment, len(s.runtimeScroll.Routing)) + copy(routing, s.runtimeScroll.Routing) + return routing + }) + if err != nil { + return err + } + queueManager := coreservices.NewQueueManager(scrollService, processLauncher) + queueManager.SetStatusObserver(s.persistCommandStatus) + + s.mu.Lock() + oldQueue := s.queueManager + commands := scrollService.GetFile().Commands + for commandName := range s.runtimeScroll.Commands { + if commands[commandName] == nil { + delete(s.runtimeScroll.Commands, commandName) + } + } + s.runtimeScroll.Artifact = artifact + s.runtimeScroll.Root = root + s.runtimeScroll.ScrollYAML = string(scrollYAML) + s.runtimeScroll.Status = domain.RuntimeScrollStatusStopped + s.runtimeScroll.LastError = "" + s.scrollService = scrollService + s.queueManager = queueManager + s.procedures = processLauncher + started := s.started + err = s.store.UpdateScroll(s.runtimeScroll) + s.mu.Unlock() + if err == nil && started { + oldQueue.Shutdown() + go queueManager.Work() + } + return err +} diff --git a/apps/druid/core/services/runtime_status.go b/apps/druid/core/services/runtime_status.go new file mode 100644 index 00000000..bbca73ff --- /dev/null +++ b/apps/druid/core/services/runtime_status.go @@ -0,0 +1,36 @@ +package services + +import "github.com/highcard-dev/daemon/internal/core/domain" + +func deriveRuntimeScrollStatus(statuses map[string]domain.LockStatus, commands map[string]*domain.CommandInstructionSet) domain.RuntimeScrollStatus { + if len(statuses) == 0 { + return domain.RuntimeScrollStatusCreated + } + hasActive := false + hasPersistentDone := false + for commandName, status := range statuses { + if status.Status == domain.ScrollLockStatusError { + return domain.RuntimeScrollStatusError + } + if status.Status == domain.ScrollLockStatusRunning || status.Status == domain.ScrollLockStatusWaiting { + hasActive = true + } + if status.Status == domain.ScrollLockStatusDone { + if command := commands[commandName]; command != nil && command.Run == domain.RunModePersistent { + hasPersistentDone = true + } + } + } + if hasActive || hasPersistentDone { + return domain.RuntimeScrollStatusRunning + } + return domain.RuntimeScrollStatusStopped +} + +func copyCommandStatuses(statuses map[string]domain.LockStatus) map[string]domain.LockStatus { + copied := map[string]domain.LockStatus{} + for command, status := range statuses { + copied[command] = status + } + return copied +} diff --git a/apps/druid/core/services/runtime_supervisor.go b/apps/druid/core/services/runtime_supervisor.go new file mode 100644 index 00000000..41f7384c --- /dev/null +++ b/apps/druid/core/services/runtime_supervisor.go @@ -0,0 +1,224 @@ +package services + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" +) + +// RuntimeSupervisor is the daemon-facing coordinator. It owns persisted runtime +// truth and session lifetimes; Docker/Kubernetes resource details stay behind +// the runtime backend. +type RuntimeSupervisor struct { + store coreservices.RuntimeScrollStore + manager *coreservices.RuntimeScrollManager + runtimeBackend ports.RuntimeBackendInterface + workerCallbacks *WorkerCallbackManager + workerCallbackURL string + workerDaemonURL string + internalToken string + authJWKSURL string + runtimeJWKSURL string + + mu sync.Mutex + sessions map[string]*RuntimeSession +} + +func NewRuntimeSupervisor( + store coreservices.RuntimeScrollStore, + manager *coreservices.RuntimeScrollManager, + runtimeBackend ports.RuntimeBackendInterface, +) *RuntimeSupervisor { + return &RuntimeSupervisor{ + store: store, + manager: manager, + runtimeBackend: runtimeBackend, + sessions: map[string]*RuntimeSession{}, + } +} + +func (s *RuntimeSupervisor) SetWorkerCallbacks(callbacks *WorkerCallbackManager, callbackURL string) { + s.workerCallbacks = callbacks + s.workerCallbackURL = strings.TrimRight(callbackURL, "/") +} + +func (s *RuntimeSupervisor) SetDevWorkerConfig(daemonURL string, internalToken string, authJWKSURL string, runtimeJWKSURL string) { + s.workerDaemonURL = strings.TrimRight(daemonURL, "/") + s.internalToken = internalToken + s.authJWKSURL = authJWKSURL + s.runtimeJWKSURL = runtimeJWKSURL +} + +func (s *RuntimeSupervisor) Start() error { + scrolls, err := s.store.ListScrolls() + if err != nil { + return err + } + for _, runtimeScroll := range scrolls { + if runtimeScroll.Status == domain.RuntimeScrollStatusDeleted { + continue + } + session, err := s.startSession(runtimeScroll) + if err != nil { + s.markScrollError(runtimeScroll, err) + continue + } + if err := session.Hydrate(); err != nil { + s.markScrollError(runtimeScroll, err) + continue + } + } + return nil +} + +func (s *RuntimeSupervisor) Create(artifact string, name string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + return s.CreateWithOwner(artifact, name, "", registryCredentials) +} + +func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerID string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + id := coreservices.RuntimeScrollIDFromName(name) + var placeholder *domain.RuntimeScroll + if id != "" { + if _, err := s.store.GetScroll(id); err == nil { + return nil, fmt.Errorf("%w: %s", coreservices.ErrScrollAlreadyExists, id) + } else if !errors.Is(err, coreservices.ErrScrollNotFound) { + return nil, err + } + placeholder = &domain.RuntimeScroll{ + ID: id, + OwnerID: ownerID, + Artifact: artifact, + Root: s.store.Root(id), + Status: domain.RuntimeScrollStatusCreated, + Commands: map[string]domain.LockStatus{}, + } + if err := s.store.CreateScroll(placeholder); err != nil { + return nil, err + } + } + markPlaceholderError := func(cause error) { + if placeholder == nil { + return + } + placeholder.Status = domain.RuntimeScrollStatusError + placeholder.LastError = cause.Error() + _ = s.store.UpdateScroll(placeholder) + } + + materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, name, registryCredentials) + if err != nil { + markPlaceholderError(err) + return nil, err + } + if materialized.Artifact != "" { + artifact = materialized.Artifact + } + if placeholder != nil { + scroll, err := domain.NewScrollFromBytes(materialized.Root, materialized.ScrollYAML) + if err != nil { + markPlaceholderError(err) + return nil, err + } + if err := scroll.Validate(false); err != nil { + markPlaceholderError(err) + return nil, err + } + placeholder.Artifact = artifact + placeholder.ArtifactDigest = materialized.ArtifactDigest + placeholder.Root = materialized.Root + placeholder.ScrollName = scroll.Name + placeholder.ScrollYAML = string(materialized.ScrollYAML) + placeholder.Status = domain.RuntimeScrollStatusCreated + placeholder.LastError = "" + placeholder.Commands = map[string]domain.LockStatus{} + if err := s.store.UpdateScroll(placeholder); err != nil { + return nil, err + } + return placeholder, nil + } + runtimeScroll, err := s.manager.CreateWithDigest(artifact, materialized.ArtifactDigest, name, ownerID, materialized.Root, materialized.ScrollYAML) + if err != nil { + return nil, err + } + return runtimeScroll, nil +} + +func (s *RuntimeSupervisor) Ensure(artifact string, name string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + return s.EnsureWithOwner(artifact, name, "", registryCredentials) +} + +func (s *RuntimeSupervisor) EnsureWithOwner(artifact string, name string, ownerID string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + id := coreservices.RuntimeScrollIDFromName(name) + if id != "" { + runtimeScroll, err := s.store.GetScroll(id) + if err == nil { + // A failed first materialization has no scroll.yaml yet. Returning it + // stops an Active CR from spawning one pull worker per reconcile. + if runtimeScroll.ScrollYAML == "" { + if ownerID != "" && runtimeScroll.OwnerID != ownerID { + runtimeScroll.OwnerID = ownerID + if err := s.store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + } + return runtimeScroll, nil + } + if runtimeScroll.Status == domain.RuntimeScrollStatusError && (artifact == "" || artifact == runtimeScroll.Artifact) { + if ownerID != "" && runtimeScroll.OwnerID != ownerID { + runtimeScroll.OwnerID = ownerID + if err := s.store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + } + return runtimeScroll, nil + } + if artifact != "" { + nextDigest := resolveArtifactDigest(artifact, registryCredentials) + artifactChanged := artifact != runtimeScroll.Artifact + digestChanged := nextDigest != "" && nextDigest != runtimeScroll.ArtifactDigest + if artifactChanged || digestChanged { + updated, err := s.updateExistingScroll(runtimeScroll, artifact, nextDigest, registryCredentials) + if err != nil { + return nil, err + } + if ownerID != "" && updated.OwnerID != ownerID { + updated.OwnerID = ownerID + if err := s.store.UpdateScroll(updated); err != nil { + return nil, err + } + } + return updated, nil + } + } + if ownerID != "" && runtimeScroll.OwnerID != ownerID { + runtimeScroll.OwnerID = ownerID + if err := s.store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + } + return runtimeScroll, nil + } + if !errors.Is(err, coreservices.ErrScrollNotFound) { + return nil, err + } + } + runtimeScroll, err := s.CreateWithOwner(artifact, name, ownerID, registryCredentials) + if err != nil { + return nil, err + } + return runtimeScroll, nil +} + +func (s *RuntimeSupervisor) List() ([]*domain.RuntimeScroll, error) { + return s.store.ListScrolls() +} + +func (s *RuntimeSupervisor) Get(id string) (*domain.RuntimeScroll, error) { + return s.store.GetScroll(id) +} diff --git a/apps/druid/core/services/runtime_supervisor_test.go b/apps/druid/core/services/runtime_supervisor_test.go new file mode 100644 index 00000000..992ecfd3 --- /dev/null +++ b/apps/druid/core/services/runtime_supervisor_test.go @@ -0,0 +1,616 @@ +package services + +import ( + "context" + "errors" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" +) + +func TestRuntimeSessionUsesCachedScrollYAML(t *testing.T) { + root := t.TempDir() + runtimeScroll := &domain.RuntimeScroll{ + ID: "cached", + Artifact: "local", + Root: root, + ScrollName: "cached", + ScrollYAML: `name: cached +desc: Cached scroll +version: 0.1.0 +app_version: "1.0" +serve: start +commands: + start: + procedures: + - image: alpine:3.20 + command: ["true"] +`, + } + + session, err := NewRuntimeSession(coreservices.NewRuntimeStateStore(t.TempDir()), runtimeScroll, &fakeWorkerBackend{}) + if err != nil { + t.Fatal(err) + } + if got := session.scrollService.GetFile().Name; got != "cached" { + t.Fatalf("scroll name = %q, want cached", got) + } +} + +func TestRuntimeSessionHydrateAutoStartsServeWithoutPreviousStatus(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSessionHydrateForceRequeuesDoneServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSessionHydrateRequeuesErrorServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusError}, + }, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSessionHydrateDoesNotDuplicateActiveServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusRunning}, + }, cachedScrollYAML("start")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + queue := session.queueManager.GetQueue() + if len(queue) != 1 { + t.Fatalf("queue len = %d, want 1: %#v", len(queue), queue) + } + if queue["start"] != domain.ScrollLockStatusWaiting { + t.Fatalf("start = %s, want waiting", queue["start"]) + } +} + +func TestRuntimeSessionHydrateSkipsMissingServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + if queue := session.queueManager.GetQueue(); len(queue) != 0 { + t.Fatalf("queue = %#v, want empty", queue) + } +} + +func TestRuntimeSessionHydrateDropsStaleCommandStatus(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "missing": {Status: domain.ScrollLockStatusDone}, + }, cachedScrollYAML("")) + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + updated, err := session.store.GetScroll(session.runtimeScroll.ID) + if err != nil { + t.Fatal(err) + } + if _, ok := updated.Commands["missing"]; ok { + t.Fatalf("stale command was not removed: %#v", updated.Commands) + } +} + +func TestRuntimeSessionHydrateDoesNotRequeueRunningPersistentServe(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, `name: cached +desc: Cached scroll +version: 0.1.0 +app_version: "1.0" +serve: start +commands: + start: + run: persistent + procedures: + - image: alpine:3.20 + command: ["true"] +`) + session.runtimeScroll.Status = domain.RuntimeScrollStatusRunning + if err := session.store.UpdateScroll(session.runtimeScroll); err != nil { + t.Fatal(err) + } + + if err := session.Hydrate(); err != nil { + t.Fatal(err) + } + + if queue := session.queueManager.GetQueue(); len(queue) != 0 { + t.Fatalf("queue = %#v, want empty", queue) + } +} + +func TestRuntimeSessionAutoStartsServeOnCreatePath(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("start")) + + if err := session.AutoStartServe(); err != nil { + t.Fatal(err) + } + + assertQueued(t, session, "start") +} + +func TestRuntimeSupervisorEnsureCanCreate(t *testing.T) { + artifact := t.TempDir() + if err := os.WriteFile(filepath.Join(artifact, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { + t.Fatal(err) + } + store := coreservices.NewRuntimeStateStore(t.TempDir()) + callbacks := NewWorkerCallbackManager() + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start")}, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.Ensure(artifact, "quiet-scroll", nil) + if err != nil { + t.Fatal(err) + } + + if runtimeScroll.Status != domain.RuntimeScrollStatusCreated { + t.Fatalf("status = %s, want created", runtimeScroll.Status) + } + if len(runtimeScroll.Commands) != 0 { + t.Fatalf("commands = %#v, want empty", runtimeScroll.Commands) + } +} + +func TestRuntimeSupervisorCreateCanCreate(t *testing.T) { + artifact := t.TempDir() + if err := os.WriteFile(filepath.Join(artifact, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { + t.Fatal(err) + } + store := coreservices.NewRuntimeStateStore(t.TempDir()) + callbacks := NewWorkerCallbackManager() + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start")}, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.Create(artifact, "quiet-create", nil) + if err != nil { + t.Fatal(err) + } + + if runtimeScroll.Status != domain.RuntimeScrollStatusCreated { + t.Fatalf("status = %s, want created", runtimeScroll.Status) + } + if len(runtimeScroll.Commands) != 0 { + t.Fatalf("commands = %#v, want empty", runtimeScroll.Commands) + } +} + +func TestRuntimeSupervisorCreateUsesPullWorkerBeforeStateMutation(t *testing.T) { + store := coreservices.NewRuntimeStateStore(t.TempDir()) + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start"), digest: "sha256:worker"} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.Create("registry.local/lab:1.0", "worker-scroll", nil) + if err != nil { + t.Fatal(err) + } + if backend.action.Mode != ports.RuntimeWorkerModeCreate || backend.action.RuntimeID != "worker-scroll" { + t.Fatalf("worker action = %#v", backend.action) + } + if backend.action.RootRef != store.Root("worker-scroll") || backend.action.MountPath != "/scroll" { + t.Fatalf("worker root = %#v, want %s mounted at /scroll", backend.action, store.Root("worker-scroll")) + } + if backend.action.CallbackToken == "" || !strings.Contains(backend.action.CallbackURL, "/internal/v1/workers/worker-scroll/complete") { + t.Fatalf("callback action = %#v", backend.action) + } + if runtimeScroll.ArtifactDigest != "sha256:worker" { + t.Fatalf("artifact digest = %s, want sha256:worker", runtimeScroll.ArtifactDigest) + } + if runtimeScroll.Root != store.Root("worker-scroll") { + t.Fatalf("root = %s, want %s", runtimeScroll.Root, store.Root("worker-scroll")) + } +} + +func TestRuntimeSupervisorEnsureMaterializationFailureIsRemembered(t *testing.T) { + store := coreservices.NewRuntimeStateStore(t.TempDir()) + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, workerErr: errors.New("pull image failed")} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + if _, err := supervisor.Ensure("registry.local/missing:1.0", "broken-scroll", nil); err == nil { + t.Fatal("Ensure error = nil, want materialization error") + } + failed, err := store.GetScroll("broken-scroll") + if err != nil { + t.Fatal(err) + } + if failed.Status != domain.RuntimeScrollStatusError || !strings.Contains(failed.LastError, "pull image failed") { + t.Fatalf("failed scroll = %#v", failed) + } + + runtimeScroll, err := supervisor.Ensure("registry.local/missing:1.0", "broken-scroll", nil) + if err != nil { + t.Fatalf("second Ensure error = %v, want remembered runtime scroll", err) + } + if runtimeScroll.Status != domain.RuntimeScrollStatusError || backend.spawnCount != 1 { + t.Fatalf("runtimeScroll=%#v spawnCount=%d, want remembered error and no respawn", runtimeScroll, backend.spawnCount) + } +} + +func TestRuntimeSupervisorEnsureDoesNotRetryExistingError(t *testing.T) { + store := coreservices.NewRuntimeStateStore(t.TempDir()) + existing := &domain.RuntimeScroll{ + ID: "invalid-scroll", + Artifact: "registry.local/invalid:1.0", + Root: store.Root("invalid-scroll"), + ScrollName: "invalid-scroll", + ScrollYAML: cachedScrollYAML("start"), + Status: domain.RuntimeScrollStatusError, + LastError: "procedure field mode is unsupported", + Commands: map[string]domain.LockStatus{}, + } + if err := store.CreateScroll(existing); err != nil { + t.Fatal(err) + } + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: updatedScrollYAML("invalid-scroll")} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.Ensure(existing.Artifact, existing.ID, nil) + if err != nil { + t.Fatal(err) + } + if runtimeScroll.Status != domain.RuntimeScrollStatusError || backend.spawnCount != 0 { + t.Fatalf("runtimeScroll=%#v spawnCount=%d, want existing error and no worker", runtimeScroll, backend.spawnCount) + } +} + +func TestRuntimeSupervisorEnsureUpdatesChangedArtifact(t *testing.T) { + store := coreservices.NewRuntimeStateStore(t.TempDir()) + root := "k8s://druid/druid-update-scroll-data" + existing := &domain.RuntimeScroll{ + ID: "update-scroll", + Artifact: "registry.local/lab:1.0", + Root: root, + ScrollName: "old-scroll", + ScrollYAML: cachedScrollYAML("start"), + Status: domain.RuntimeScrollStatusRunning, + Commands: map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, + Routing: []domain.RuntimeRouteAssignment{{Name: "old-http", Host: "old.example.test"}}, + } + if err := store.CreateScroll(existing); err != nil { + t.Fatal(err) + } + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: updatedScrollYAML("updated-scroll")} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + updated, err := supervisor.Ensure("registry.local/lab:2.0", "update-scroll", []domain.RegistryCredential{{Host: "registry.local", Username: "bot"}}) + if err != nil { + t.Fatal(err) + } + + if backend.stopRoot != root { + t.Fatalf("stop root = %s, want %s", backend.stopRoot, root) + } + if backend.action.Mode != ports.RuntimeWorkerModeUpdate || backend.action.Artifact != "registry.local/lab:2.0" || backend.action.RootRef != root { + t.Fatalf("worker action = %#v", backend.action) + } + if updated.Artifact != "registry.local/lab:2.0" || updated.ScrollName != "updated-scroll" { + t.Fatalf("updated scroll = %#v", updated) + } + if updated.Status != domain.RuntimeScrollStatusStopped { + t.Fatalf("status = %s, want stopped", updated.Status) + } + if len(updated.Commands) != 0 { + t.Fatalf("commands = %#v, want cleared", updated.Commands) + } + if len(updated.Routing) != 0 { + t.Fatalf("routing = %#v, want cleared", updated.Routing) + } + if !strings.Contains(updated.ScrollYAML, "updated-scroll") { + t.Fatalf("scroll yaml = %q", updated.ScrollYAML) + } +} + +func TestRuntimeSupervisorUpdateUsesPullWorkerWhenAvailable(t *testing.T) { + store := coreservices.NewRuntimeStateStore(t.TempDir()) + root := "k8s://druid/druid-update-worker-data" + existing := &domain.RuntimeScroll{ + ID: "update-worker", + Artifact: "registry.local/lab:1.0", + Root: root, + ScrollName: "old-scroll", + ScrollYAML: cachedScrollYAML("start"), + Status: domain.RuntimeScrollStatusStopped, + Commands: map[string]domain.LockStatus{}, + } + if err := store.CreateScroll(existing); err != nil { + t.Fatal(err) + } + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: updatedScrollYAML("updated-worker"), digest: "sha256:updated"} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + updated, err := supervisor.Ensure("registry.local/lab:2.0", "update-worker", nil) + if err != nil { + t.Fatal(err) + } + if backend.action.Mode != ports.RuntimeWorkerModeUpdate || backend.action.RootRef != root { + t.Fatalf("worker action = %#v", backend.action) + } + if updated.Artifact != "registry.local/lab:2.0" || updated.ArtifactDigest != "sha256:updated" || updated.ScrollName != "updated-worker" { + t.Fatalf("updated scroll = %#v", updated) + } +} + +func TestRuntimeSessionApplyRoutingPersistsAssignments(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) + + updated, err := session.ApplyRouting([]domain.RuntimeRouteAssignment{{ + Name: "web-http", + PortName: "http", + Host: "scroll.example.test", + PublicPort: 443, + URL: "https://scroll.example.test", + Protocol: "https", + }}) + if err != nil { + t.Fatal(err) + } + + if len(updated.Routing) != 1 || updated.Routing[0].Host != "scroll.example.test" { + t.Fatalf("routing = %#v", updated.Routing) + } +} + +func TestRuntimeSessionProceduresUsesLauncherStatus(t *testing.T) { + session := newRuntimeSessionForTest(t, map[string]domain.LockStatus{}, cachedScrollYAML("")) + session.queueManager.RememberDoneItem("start") + session.procedures = fakeProcedureStatuses{statuses: map[string]domain.ScrollLockStatus{ + "start.0": domain.ScrollLockStatusRunning, + }} + + got := session.Procedures() + if got["start.0"] != domain.ScrollLockStatusRunning { + t.Fatalf("procedures = %#v", got) + } + if _, ok := got["start"]; ok { + t.Fatalf("procedures leaked queue status: %#v", got) + } +} + +func TestDeriveRuntimeScrollStatusTreatsDonePersistentAsRunning(t *testing.T) { + status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ + "start": {Status: domain.ScrollLockStatusDone}, + }, map[string]*domain.CommandInstructionSet{ + "start": {Run: domain.RunModePersistent}, + }) + + if status != domain.RuntimeScrollStatusRunning { + t.Fatalf("status = %s, want running", status) + } +} + +func TestDeriveRuntimeScrollStatusTreatsDoneFiniteAsStopped(t *testing.T) { + status := deriveRuntimeScrollStatus(map[string]domain.LockStatus{ + "report": {Status: domain.ScrollLockStatusDone}, + }, map[string]*domain.CommandInstructionSet{ + "report": {Run: domain.RunModeAlways}, + }) + + if status != domain.RuntimeScrollStatusStopped { + t.Fatalf("status = %s, want stopped", status) + } +} + +func newRuntimeSessionForTest(t *testing.T, commands map[string]domain.LockStatus, scrollYAML string) *RuntimeSession { + t.Helper() + root := t.TempDir() + store := coreservices.NewRuntimeStateStore(t.TempDir()) + runtimeScroll := &domain.RuntimeScroll{ + ID: "cached", + Artifact: "local", + Root: root, + ScrollName: "cached", + ScrollYAML: scrollYAML, + Commands: commands, + } + if err := store.CreateScroll(runtimeScroll); err != nil { + t.Fatal(err) + } + session, err := NewRuntimeSession(store, runtimeScroll, &fakeWorkerBackend{}) + if err != nil { + t.Fatal(err) + } + return session +} + +type fakeWorkerBackend struct { + callbacks *WorkerCallbackManager + scrollYAML string + digest string + workerErr error + action ports.RuntimeWorkerAction + stopRoot string + spawnCount int +} + +func (f *fakeWorkerBackend) Name() string { + return "fake-worker" +} + +func (f *fakeWorkerBackend) ReadScrollFile(root string) ([]byte, error) { + return []byte(f.scrollYAML), nil +} + +func (f *fakeWorkerBackend) RunCommand(command ports.RuntimeCommand) (*int, error) { + return nil, nil +} + +func (f *fakeWorkerBackend) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { + return nil, nil +} + +func (f *fakeWorkerBackend) RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { + return nil, nil +} + +func (f *fakeWorkerBackend) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { + return nil +} + +func (f *fakeWorkerBackend) StopDev(ctx context.Context, root string) error { return nil } + +func (f *fakeWorkerBackend) Attach(commandName string, data string) error { + return nil +} + +func (f *fakeWorkerBackend) Signal(commandName string, target string, signal string, root string) error { + return nil +} + +func (f *fakeWorkerBackend) StopRuntime(root string) error { + f.stopRoot = root + return nil +} + +func (f *fakeWorkerBackend) DeleteRuntime(root string, purgeData bool) error { + return nil +} + +func (f *fakeWorkerBackend) BackupRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { + return nil +} + +func (f *fakeWorkerBackend) RestoreRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { + return nil +} + +func (f *fakeWorkerBackend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorkerAction) error { + f.action = action + f.spawnCount++ + if f.workerErr != nil { + return f.workerErr + } + if f.callbacks == nil { + return nil + } + return f.callbacks.Complete(action.RuntimeID, action.CallbackToken, ports.RuntimeWorkerResult{ + ScrollYAML: f.scrollYAML, + ArtifactDigest: f.digest, + }) +} + +func cachedScrollYAML(serve string) string { + yaml := `name: cached +desc: Cached scroll +version: 0.1.0 +app_version: "1.0" +` + if serve != "" { + yaml += "serve: " + serve + "\n" + } + yaml += `commands: + start: + run: once + procedures: + - image: alpine:3.20 + command: ["true"] +` + return yaml +} + +func updatedScrollYAML(name string) string { + return `name: ` + name + ` +desc: Updated scroll +version: 0.2.0 +app_version: "2.0" +serve: start +commands: + start: + procedures: + - image: alpine:3.20 + command: ["true"] +` +} + +func assertQueued(t *testing.T, session *RuntimeSession, command string) { + t.Helper() + queue := session.queueManager.GetQueue() + if queue[command] != domain.ScrollLockStatusWaiting { + t.Fatalf("%s = %s, want waiting; queue=%#v", command, queue[command], queue) + } +} + +type fakeProcedureStatuses struct { + statuses map[string]domain.ScrollLockStatus +} + +func (f fakeProcedureStatuses) Run(string) error { + return nil +} + +func (f fakeProcedureStatuses) GetProcedureStatuses() map[string]domain.ScrollLockStatus { + return f.statuses +} diff --git a/apps/druid/core/services/runtime_update.go b/apps/druid/core/services/runtime_update.go new file mode 100644 index 00000000..2e8d8a08 --- /dev/null +++ b/apps/druid/core/services/runtime_update.go @@ -0,0 +1,73 @@ +package services + +import ( + "context" + "errors" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" +) + +func (s *RuntimeSupervisor) updateExistingScroll(runtimeScroll *domain.RuntimeScroll, artifact string, knownDigest string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { + s.mu.Lock() + session := s.sessions[runtimeScroll.ID] + delete(s.sessions, runtimeScroll.ID) + s.mu.Unlock() + if session != nil { + session.Shutdown() + } + + if runtimeScroll.Status == domain.RuntimeScrollStatusRunning { + if err := s.runtimeBackend.StopRuntime(runtimeScroll.Root); err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + } + + if s.workerCallbacks == nil || s.workerCallbackURL == "" { + err := errors.New("worker callback URL is required for daemon update") + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + materialized, err := s.runPullWorker(context.Background(), s.runtimeBackend, ports.RuntimeWorkerModeUpdate, runtimeScroll.ID, artifact, runtimeScroll.Root, registryCredentials) + if err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + scroll, err := domain.NewScrollFromBytes(materialized.Root, materialized.ScrollYAML) + if err != nil { + return nil, err + } + if err := scroll.Validate(false); err != nil { + return nil, err + } + runtimeScroll.Artifact = materialized.Artifact + if runtimeScroll.Artifact == "" { + runtimeScroll.Artifact = artifact + } + runtimeScroll.ArtifactDigest = materialized.ArtifactDigest + if runtimeScroll.ArtifactDigest == "" { + runtimeScroll.ArtifactDigest = knownDigest + } + runtimeScroll.Root = materialized.Root + runtimeScroll.ScrollName = scroll.Name + runtimeScroll.ScrollYAML = string(materialized.ScrollYAML) + runtimeScroll.Commands = map[string]domain.LockStatus{} + runtimeScroll.Routing = nil + runtimeScroll.LastError = "" + if runtimeScroll.Status == domain.RuntimeScrollStatusRunning || runtimeScroll.Status == domain.RuntimeScrollStatusStopped { + runtimeScroll.Status = domain.RuntimeScrollStatusStopped + } else { + runtimeScroll.Status = domain.RuntimeScrollStatusCreated + } + if err := s.store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + return s.store.GetScroll(runtimeScroll.ID) +} diff --git a/apps/druid/core/services/worker_callbacks.go b/apps/druid/core/services/worker_callbacks.go new file mode 100644 index 00000000..69a27004 --- /dev/null +++ b/apps/druid/core/services/worker_callbacks.go @@ -0,0 +1,65 @@ +package services + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "sync" + + "github.com/highcard-dev/daemon/internal/core/ports" +) + +type WorkerCallbackManager struct { + mu sync.Mutex + actions map[string]workerCallbackAction +} + +type workerCallbackAction struct { + token string + result chan ports.RuntimeWorkerResult +} + +func NewWorkerCallbackManager() *WorkerCallbackManager { + return &WorkerCallbackManager{actions: map[string]workerCallbackAction{}} +} + +func (m *WorkerCallbackManager) Register(runtimeID string) (string, <-chan ports.RuntimeWorkerResult, error) { + tokenBytes := make([]byte, 32) + if _, err := rand.Read(tokenBytes); err != nil { + return "", nil, err + } + token := hex.EncodeToString(tokenBytes) + ch := make(chan ports.RuntimeWorkerResult, 1) + m.mu.Lock() + if _, ok := m.actions[runtimeID]; ok { + m.mu.Unlock() + return "", nil, fmt.Errorf("worker action already pending for runtime %s", runtimeID) + } + m.actions[runtimeID] = workerCallbackAction{token: token, result: ch} + m.mu.Unlock() + return token, ch, nil +} + +func (m *WorkerCallbackManager) Cancel(runtimeID string) { + m.mu.Lock() + delete(m.actions, runtimeID) + m.mu.Unlock() +} + +func (m *WorkerCallbackManager) Complete(runtimeID string, token string, result ports.RuntimeWorkerResult) error { + m.mu.Lock() + action, ok := m.actions[runtimeID] + if !ok { + m.mu.Unlock() + return fmt.Errorf("unknown or completed worker action") + } + if token == "" || token != action.token { + m.mu.Unlock() + return fmt.Errorf("invalid worker token") + } + delete(m.actions, runtimeID) + m.mu.Unlock() + action.result <- result + close(action.result) + return nil +} diff --git a/apps/druid/core/services/worker_callbacks_test.go b/apps/druid/core/services/worker_callbacks_test.go new file mode 100644 index 00000000..2511b9ee --- /dev/null +++ b/apps/druid/core/services/worker_callbacks_test.go @@ -0,0 +1,55 @@ +package services + +import ( + "testing" + "time" + + "github.com/highcard-dev/daemon/internal/core/ports" +) + +func TestWorkerCallbackValidatesTokenAndRejectsReplay(t *testing.T) { + manager := NewWorkerCallbackManager() + token, resultCh, err := manager.Register("scroll-a") + if err != nil { + t.Fatal(err) + } + result := ports.RuntimeWorkerResult{ScrollYAML: "name: scroll-a\n"} + if err := manager.Complete("scroll-a", "wrong-token", result); err == nil { + t.Fatal("invalid token should fail") + } + if err := manager.Complete("scroll-a", token, result); err != nil { + t.Fatal(err) + } + select { + case got := <-resultCh: + if got.ScrollYAML != result.ScrollYAML { + t.Fatalf("result = %#v", got) + } + case <-time.After(time.Second): + t.Fatal("callback result was not delivered") + } + if err := manager.Complete("scroll-a", token, result); err == nil { + t.Fatal("replayed callback should fail") + } +} + +func TestWorkerCallbackRejectsDuplicatePendingRuntime(t *testing.T) { + manager := NewWorkerCallbackManager() + if _, _, err := manager.Register("scroll-a"); err != nil { + t.Fatal(err) + } + if _, _, err := manager.Register("scroll-a"); err == nil { + t.Fatal("duplicate pending action should fail") + } +} + +func TestWorkerCallbackRejectsUnknownRuntime(t *testing.T) { + manager := NewWorkerCallbackManager() + token, _, err := manager.Register("scroll-a") + if err != nil { + t.Fatal(err) + } + if err := manager.Complete("scroll-b", token, ports.RuntimeWorkerResult{}); err == nil { + t.Fatal("unknown runtime should fail") + } +} diff --git a/config/helm-charts/druid-cli/chart_test.go b/config/helm-charts/druid-cli/chart_test.go index 843670e6..ea9111a2 100644 --- a/config/helm-charts/druid-cli/chart_test.go +++ b/config/helm-charts/druid-cli/chart_test.go @@ -20,9 +20,14 @@ func TestChartRendersDefaultAndCustomValues(t *testing.T) { "--runtime=kubernetes", "--listen=:8081", "--public-listen=:8082", + "--worker-callback-listen=:8083", + "--worker-daemon-url=http://druid-cli:8081", + "--public-jwks-url=http://druid-cli:8082/.well-known/jwks.json", "name: management", "name: public", + "name: worker", "DRUID_K8S_PULL_IMAGE", + `resources: ["secrets"]`, "hubble-relay.kube-system.svc.cluster.local:80", } { if !strings.Contains(defaultManifest, want) { @@ -35,11 +40,13 @@ func TestChartRendersDefaultAndCustomValues(t *testing.T) { customManifest := helmTemplate(t, "--set", "auth.enabled=true", + "--set", "auth.jwksUrl=https://auth.example.test/.well-known/jwks.json", + "--set", "auth.publicJwksUrl=https://runtime.example.test/.well-known/jwks.json", "--set", "auth.existingSecret=druid-runtime-token", "--set", "runtime.namespaces.mode=all", "--set", "runtime.storageClass=local-path", "--set", "runtime.registryPlainHTTP=true", - "--set", "runtime.pullImage=registry.local/druid-client:e2e", + "--set", "runtime.pullImage=registry.local/druid-cli:e2e", "--set", "runtime.helperImage=busybox:1.36", "--set", "runtime.kubeconfigSecret.name=druid-kubeconfig", "--set", "hubble.relayAddr=hubble.example:80", @@ -53,7 +60,9 @@ func TestChartRendersDefaultAndCustomValues(t *testing.T) { "kind: ClusterRoleBinding", "name: DRUID_INTERNAL_TOKEN", "name: \"druid-runtime-token\"", - "value: \"registry.local/druid-client:e2e\"", + "--auth-jwks-url=https://auth.example.test/.well-known/jwks.json", + "--public-jwks-url=https://runtime.example.test/.well-known/jwks.json", + "value: \"registry.local/druid-cli:e2e\"", "value: \"busybox:1.36\"", "value: \"true\"", "value: /etc/druid/kubeconfig", diff --git a/config/helm-charts/druid-cli/templates/deployment.yaml b/config/helm-charts/druid-cli/templates/deployment.yaml index 978c4fec..2daf2306 100644 --- a/config/helm-charts/druid-cli/templates/deployment.yaml +++ b/config/helm-charts/druid-cli/templates/deployment.yaml @@ -33,9 +33,16 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: - - serve + - daemon - --listen=:{{ .Values.service.managementPort }} - --public-listen=:{{ .Values.service.publicPort }} + - --worker-callback-listen=:{{ .Values.service.workerCallbackPort }} + - --worker-callback-url=http://{{ include "druid-cli.fullname" . }}:{{ .Values.service.workerCallbackPort }} + - --worker-daemon-url={{ default (printf "http://%s:%v" (include "druid-cli.fullname" .) .Values.service.managementPort) .Values.runtime.workerDaemonUrl }} + - --public-jwks-url={{ default (printf "http://%s:%v/.well-known/jwks.json" (include "druid-cli.fullname" .) .Values.service.publicPort) .Values.auth.publicJwksUrl }} + {{- if .Values.auth.jwksUrl }} + - --auth-jwks-url={{ .Values.auth.jwksUrl }} + {{- end }} - --runtime={{ .Values.runtime.backend }} - --state-dir={{ .Values.runtime.stateDir }} ports: @@ -45,6 +52,9 @@ spec: - name: public containerPort: {{ .Values.service.publicPort }} protocol: TCP + - name: worker + containerPort: {{ .Values.service.workerCallbackPort }} + protocol: TCP env: - name: DRUID_K8S_NAMESPACE value: {{ default .Release.Namespace .Values.runtime.namespaces.single | quote }} @@ -56,7 +66,7 @@ spec: value: {{ .Values.runtime.helperImage | quote }} - name: DRUID_K8S_REGISTRY_SECRET value: {{ .Values.runtime.registrySecret | quote }} - - name: DRUID_K8S_REGISTRY_PLAIN_HTTP + - name: DRUID_REGISTRY_PLAIN_HTTP value: {{ ternary "true" "false" .Values.runtime.registryPlainHTTP | quote }} - name: DRUID_HUBBLE_RELAY_ADDR value: {{ .Values.hubble.relayAddr | quote }} diff --git a/config/helm-charts/druid-cli/templates/networkpolicy.yaml b/config/helm-charts/druid-cli/templates/networkpolicy.yaml index e426f0f7..412231ab 100644 --- a/config/helm-charts/druid-cli/templates/networkpolicy.yaml +++ b/config/helm-charts/druid-cli/templates/networkpolicy.yaml @@ -22,4 +22,6 @@ spec: port: {{ .Values.service.managementPort }} - protocol: TCP port: {{ .Values.service.publicPort }} + - protocol: TCP + port: {{ .Values.service.workerCallbackPort }} {{- end }} diff --git a/config/helm-charts/druid-cli/templates/rbac.yaml b/config/helm-charts/druid-cli/templates/rbac.yaml index 30b5cf2a..9c92589d 100644 --- a/config/helm-charts/druid-cli/templates/rbac.yaml +++ b/config/helm-charts/druid-cli/templates/rbac.yaml @@ -9,6 +9,9 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "update", "patch", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims", "services", "pods"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] diff --git a/config/helm-charts/druid-cli/templates/service.yaml b/config/helm-charts/druid-cli/templates/service.yaml index 216056a3..3515c812 100644 --- a/config/helm-charts/druid-cli/templates/service.yaml +++ b/config/helm-charts/druid-cli/templates/service.yaml @@ -15,5 +15,9 @@ spec: targetPort: public protocol: TCP name: public + - port: {{ .Values.service.workerCallbackPort }} + targetPort: worker + protocol: TCP + name: worker selector: {{- include "druid-cli.selectorLabels" . | nindent 4 }} diff --git a/config/helm-charts/druid-cli/values.yaml b/config/helm-charts/druid-cli/values.yaml index cb0af8eb..6d794bef 100644 --- a/config/helm-charts/druid-cli/values.yaml +++ b/config/helm-charts/druid-cli/values.yaml @@ -34,6 +34,7 @@ service: type: ClusterIP managementPort: 8081 publicPort: 8082 + workerCallbackPort: 8083 ingress: enabled: false @@ -49,11 +50,12 @@ ingress: runtime: backend: kubernetes stateDir: /var/lib/druid/runtime + workerDaemonUrl: "" namespaces: mode: single single: "" storageClass: "" - pullImage: ghcr.io/highcard-dev/druid-client:dev + pullImage: ghcr.io/highcard-dev/druid:dev helperImage: alpine:3.20 registrySecret: "" registryPlainHTTP: false @@ -66,6 +68,8 @@ hubble: auth: enabled: false + jwksUrl: "" + publicJwksUrl: "" existingSecret: "" tokenKey: token diff --git a/docs_md/main.go b/docs_md/main.go index bc2b0fec..91e0aa77 100644 --- a/docs_md/main.go +++ b/docs_md/main.go @@ -9,7 +9,6 @@ import ( "regexp" "strings" - clientcli "github.com/highcard-dev/daemon/apps/druid-client/adapters/cli" coldstartercli "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/cli" druidcli "github.com/highcard-dev/daemon/apps/druid/adapters/cli" "github.com/spf13/cobra" @@ -60,7 +59,6 @@ func main() { roots := []*cobra.Command{ druidcli.RootCmd, - clientcli.NewRootCommand(), coldstartercli.NewRootCommand(), } for _, root := range roots { diff --git a/examples/README.md b/examples/README.md index 99c8d651..edce2824 100644 --- a/examples/README.md +++ b/examples/README.md @@ -14,19 +14,19 @@ Each example declares the container paths it needs with `mounts`. Mounts are sou - `jobs`: finite job-only pipeline that prepares data, transforms it, reports output, and exits. - `container-lab`: container-only integration example with setup jobs, persistent web/cache services, ports, mounts, env, smoke checks, reports, and signal cleanup. -Use `druid serve --runtime docker` for container execution. The daemon listens on a Unix socket, and `druid-client` connects to that socket with `--daemon-socket`. The client owns OCI work: `druid-client pull` downloads artifacts, while `druid-client create [name]` materializes a scroll and registers it with the daemon. For already checked-out examples, use `druid-client register [dir]` and omit `[name]` so ids are derived from each example's `scroll.yaml`. Run commands with `druid-client run ` and inspect state with `druid-client describe `. +Use `druid serve --runtime docker` for container execution. The daemon listens on a Unix socket, and `druid` connects to that socket with `--daemon-socket`. `druid pull` downloads artifacts, while `druid create [name]` materializes a scroll and registers it with the daemon. For already checked-out examples, pass the local directory to `druid create`. Run commands with `druid run ` and inspect state with `druid describe `. Runtime procedures use `image`, `command`, `working_dir`, `env`, `ports`, `mounts`, `signal`, and `tty` directly on each procedure. -The coldstart gate is a normal command that runs the standalone `druid-coldstarter` binary/image. Build the local image with `make build-coldstarter-image` before running the Minecraft example. Custom coldstart handlers belong under `data/coldstart/` inside the canonical scroll volume. +The coldstart gate is a normal command that runs `druid-coldstarter` from the same runtime image as other Druid workers. It is configured only through env, with `DRUID_ROOT` pointing at the mounted runtime root. Custom coldstart handlers belong in the scroll root, for example `packet_handler/minecraft.lua`. The `container-lab` example intentionally avoids coldstarter so it can be used as a broad runtime smoke test for Docker and Kubernetes: ```bash -druid-client register examples/container-lab -druid-client describe container-lab -druid-client ports container-lab -druid-client run container-lab verify -druid-client run container-lab report -druid-client run container-lab stop +druid create examples/container-lab container-lab +druid describe container-lab +druid ports container-lab +druid run container-lab verify +druid run container-lab report +druid run container-lab stop ``` diff --git a/examples/minecraft/scroll.yaml b/examples/minecraft/scroll.yaml index 9a2662c9..960c6139 100644 --- a/examples/minecraft/scroll.yaml +++ b/examples/minecraft/scroll.yaml @@ -32,18 +32,18 @@ commands: run: restart procedures: - id: coldstart - image: druid-coldstarter:local + image: highcard/druid:stable expectedPorts: - name: minecraft keepAliveTraffic: 10kb/5m mounts: - path: /runtime + sub_path: . + env: + DRUID_ROOT: "/runtime" + DRUID_COLDSTARTER_STATUS_FILE: ".coldstarter-finished.json" command: - druid-coldstarter - - --runtime-config - - /runtime/.druid/runtime.json - - --status-file - - .coldstarter-finished.json - id: start image: eclipse-temurin:21-jre diff --git a/go.mod b/go.mod index d00b2237..c423b0d1 100644 --- a/go.mod +++ b/go.mod @@ -71,11 +71,13 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/morikuni/aec v1.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/ncruces/go-strftime v1.0.0 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect @@ -123,7 +125,6 @@ require ( github.com/docker/docker v28.3.3+incompatible github.com/docker/go-connections v0.5.0 github.com/getkin/kin-openapi v0.133.0 - github.com/go-co-op/gocron v1.37.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 github.com/oapi-codegen/runtime v1.1.2 diff --git a/go.sum b/go.sum index 27ac2154..b7567ca1 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,6 @@ github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vt github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= -github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= -github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -130,6 +128,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= @@ -147,6 +147,8 @@ github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= diff --git a/internal/api/dev.go b/internal/api/dev.go new file mode 100644 index 00000000..6ebc777b --- /dev/null +++ b/internal/api/dev.go @@ -0,0 +1,17 @@ +package api + +type DevWatchRequest struct { + WatchPaths []string `json:"watchPaths"` + HotReloadCommands []string `json:"hotReloadCommands,omitempty"` +} + +type DevWatchResponse struct { + Status string `json:"status"` + Enabled bool `json:"enabled"` + WatchedPaths []string `json:"watchedPaths,omitempty"` +} + +type DevWatchStatus struct { + Enabled bool `json:"enabled"` + WatchedPaths []string `json:"watchedPaths"` +} diff --git a/internal/api/generated.go b/internal/api/generated.go index 0d1c542a..8f0f7c77 100644 --- a/internal/api/generated.go +++ b/internal/api/generated.go @@ -44,18 +44,15 @@ type CreateScrollRequest struct { // Artifact OCI artifact reference or local scroll path Artifact string `json:"artifact"` - // DataRoot Optional daemon-local path or backend ref containing runtime data directory. If omitted, a materializing runtime backend may pull the artifact. - DataRoot *string `json:"data_root,omitempty"` - // Id Deprecated alias for name. Optional local runtime scroll id/name. Id *string `json:"id,omitempty"` // Name Optional local runtime scroll id/name. If omitted, the daemon derives it from scroll.yaml name. Name *string `json:"name,omitempty"` - // ScrollRoot Optional daemon-local path or backend ref containing scroll.yaml and scroll spec files. If omitted, a materializing runtime backend may pull the artifact. - ScrollRoot *string `json:"scroll_root,omitempty"` - Start *bool `json:"start,omitempty"` + // OwnerId Runtime owner id used for customer-facing route authorization. + OwnerId *string `json:"owner_id,omitempty"` + RegistryCredentials *[]RegistryCredential `json:"registry_credentials,omitempty"` } // DeletedScroll defines model for DeletedScroll. @@ -66,12 +63,14 @@ type DeletedScroll struct { // EnsureScrollRequest defines model for EnsureScrollRequest. type EnsureScrollRequest struct { - Artifact string `json:"artifact"` - DataRoot *string `json:"data_root,omitempty"` - Id *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - ScrollRoot *string `json:"scroll_root,omitempty"` - Start *bool `json:"start,omitempty"` + Artifact string `json:"artifact"` + ArtifactDigest *string `json:"artifact_digest,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + + // OwnerId Runtime owner id used for customer-facing route authorization. + OwnerId *string `json:"owner_id,omitempty"` + RegistryCredentials *[]RegistryCredential `json:"registry_credentials,omitempty"` } // HealthResponse defines model for HealthResponse. @@ -86,10 +85,18 @@ type HealthResponse struct { StartDate *time.Time `json:"start_date"` } +// RegistryCredential defines model for RegistryCredential. +type RegistryCredential struct { + Host string `json:"host"` + Password string `json:"password"` + Username string `json:"username"` +} + // RuntimeArtifactOperationRequest defines model for RuntimeArtifactOperationRequest. type RuntimeArtifactOperationRequest struct { - Artifact string `json:"artifact"` - Restart *bool `json:"restart,omitempty"` + Artifact string `json:"artifact"` + RegistryCredentials *[]RegistryCredential `json:"registry_credentials,omitempty"` + Restart *bool `json:"restart,omitempty"` } // RuntimePortStatus defines model for RuntimePortStatus. @@ -141,13 +148,12 @@ type RuntimeScroll struct { Artifact string `json:"artifact"` Commands *map[string]interface{} `json:"commands,omitempty"` CreatedAt time.Time `json:"created_at"` - DataRoot string `json:"data_root"` Id string `json:"id"` LastError *string `json:"last_error,omitempty"` OwnerId *string `json:"owner_id,omitempty"` + Root string `json:"root"` Routing *[]RuntimeRouteAssignment `json:"routing,omitempty"` ScrollName string `json:"scroll_name"` - ScrollRoot string `json:"scroll_root"` Status RuntimeScrollStatus `json:"status"` UpdatedAt time.Time `json:"updated_at"` } @@ -2938,46 +2944,46 @@ func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, option // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xa3XMbuQ3/VzhsH2XL6V374Ddf0rvmmptzrXTycJfxUFxIYsQlaZIrW/Xof+/wa7Uf", - "XH3ZvsaZvmRiEQCBHwASwPIRU1kqKUBYgy8fsaELKIn/75VSfH0jK8vE/AbuKjDW/ay0VKAtA09EjGFz", - "USZ2ZqH0//mzhhm+xH8ab8WPo+zxTSUsK8GJhquaH29G2K4V4EtMtCZrvNmMsIa7imko8OVvra0+17Ry", - "+gWoZ34ry5KIYmKJrcwvRHn1ioJZJgXh1w21ra4gJ0ADsTChWnI+bLC2bEaoXynAUM2U2wBf4l/fvkdp", - "FWmYgQZBAUmNuKSEI+MFI0XsAo8wPJBS8WBt4DHnha5YcT6fjy0Y6/+5dP/gWldjNRNzp2tBLLnVUub0", - "UMFiVBAopTgLu7ttnS5TQpcgCqcgolJYwgQTc6SDT5CTiwqmgVqp1+fo/QzJklkLxQgRVBILmhHO/tPk", - "SSJLskaq4hzZBdRInOe0Z0Vf7XegNFBioUCEM2LQTGokSAnnqLYomJL2jYCyYuzJWqB+kVOT29lR7oBs", - "5wYtMJyNAWBUgGYrMIhZNNOyjGzna1JydLhmges5fdrUg4gimWMUUDRjHMxLuddYoqMRM1Jx28m4qZQc", - "iOgneEqtXHa/Aw4WipCd/bQMEZXTxFaeYOuAIkjqK95RhzmSKCCn0d+FqfQxx8XuJB5IksEI3hdAf4hX", - "/gGE28UNGCWFgb75pSwy6fa20hqERQvPjQLGyNM2M0Uuc8GltJxrMKYv9jquIAWagrBkDv4Q4ZIULqCd", - "YsTRuvybSV0Siy/xjEvijtiSPLCyKvHlm4uLES6ZCH9d1CqIqpyCrpG8LYjN2PZpAaJ5OHhaH231jo7x", - "zKUWHmFRcU6mztyWMwZC0kOU80O8Uq+ir35Nlp4Wlxr6sTIj3DwxWKKS11LbSZ2XbbWmshLNsK/3GeGF", - "NPaWqazCfk1J3TSHCQvz4K8lgLribAUfNZnNGM3K4MTYW0ItWzG7viVeVNZlh2fksEZKSwpFpQf4tLSS", - "Sp53zsPtdG0DXLV+TNi/fb/VrbGTkZWm+W1sD40G3HHxqL0Sj1zulnnPRCHv8zodY10n+LwfmthGDzQA", - "HcUI2xpfI7QjYruVai9s4cGCFoTvis/jDnOn+O3w6q4AUdWUM7ojHSqdY9zstp+J+Uei55CxflBNt2AU", - "oUdnxz7jT80dA9wXtsOtQS4ku6gY0CtGYVjDRDBk4WFxe5t+78VwS4HOdjvCeKh22nkX0NBVmaO7Keq7", - "qeKok/Skgsgf3KB1cGtvWd4L0LcDvDpE9rO3rnU19pRqLVWuwtUhvyVA8QjrSrgC35enUin/WwBgVFe3", - "nzPwVqo40iW5krgOl7YRTe+1za+NaQVFS51c1IZw/SDnO3v5huOGMrf2SWcLn6m00syuJ86/sQQBokFf", - "VXax/evHBNXPnz56jJsV38+fPiIrlyBC98cKEJbZNVJarlgB2qPhxLvL0Ivb4rywVnnNHH/asy1+spDa", - "nrnqpUB3Feh12kxq9AmmE0mXYF3bJ4Cm4pY5Rk+M0x0TttjuTBT7J6zxxqHAxEy6jX3v6M+BTdfId7pi", - "BXr74T3ipBJ0Aca3lCURZA4mdZ2gz3y3WPeaRCnOaCi6R4izJfwu5sS11qBXoM3IDxymxIAZeYH3ME1r", - "5797dZn1zUCtAB5htxrUujh/c37hs1yBIIrhS/yd/2mEXXPsHTomio1Xb8ah23C/xFusbeFPYFPN3upL", - "sBceyun3RSAMbY/3l6+WfffjN/vLxUVCMlYKDQjGX4zbKk3Z9h02nebKu6qtc6Dwsf3Xi+/+wI0n4cpB", - "lSArwkIH4/OpKkui1xHOLo6WzI07RqIn3Inh8MafHWtyU4gc0/BTG/4PzNhJpHki+Mcc+fHy7A8pe9jc", - "tKZHpoOLU78zYGpCE1ea2LgKwGSAaI4rcTinwdgfZLF+tkDITUQ37UvB3f6bnh/ePJsKHfj3wY3SLdlG", - "PRjSwX037P2QHIOf+PgCKuuR5kTohTySGzod5JGL/5lHAmpdjwRDuqNWeGDGhqtFxpEnX4cZijnaXY+s", - "2IRz3pVEfXeFkWLtLkU0KcGCdls8hjs0DuvjFerLnzbQowZo3drp8ws6oT0O3e+EVBZuRvj7i+/7t1+H", - "XEiLZr5TbnstbHtUHo3yx/hPYF8n8keG/1MRd/foE48tlwdjV5dVavjs+sGvv7BLnv883Df4/NrOxgAz", - "cqwxIdun4gPQqpFg0WsneTx17uPH+L/NsPdvKhF0jh9RXyICRlkhtN7wlaT3v0PX2k3KJ6b5TSW6d+EW", - "mZOcL2ZsPlhE16fv20D3FZ7B3Wa954hros2204wG9w9PlSM7FVMjOZiDUA2UXyGuRwzx+pgnw9AS1lCg", - "qZ9yxKllH/qA3pmhUkGB6BaUE8Dncn4A8B8c1SsrKFpzrgzmzqZT8OYBi4R1/DNBPoy0ktoeAPW1DDX5", - "V4f1MV1941vk0Z09ckCl4cazV3kt6SdlTB0phzhzS/vKsqf36it3UyTrPJ65zFEdipMAv6uggv1Y/8uT", - "fYMwe8MGIU7hfdeg2sJ8F0HZfzxpMFbumsLcBIL/tzIv3ecGnA/uZZLjTsqtxoe6vNf9a9U4F4u0r8f1", - "uae2X5u7h/qels+vQRtmbHxqJfVZeLQLBQqvEpCufdMPgvD9el8IjK1/hnDApdZ6tvDqS5X2I4xDqpXA", - "gBJemXrRkinfPvDUHYYTfFS/G8sn6cQtf6Njv0l46Lc7PzzRs8zzjJVqF9BSfbM4+ycP+3CWqltQ30u9", - "5JIUBt0vGAekNPiXM2LuP0LvcUN81pNAHPo6fnX9HscXVniMHVJRaO+zflAqfEAvQVj/0SPOfBD4u9NR", - "1j6q50yP/f4QGauBlM4U4t+BW81gRfiW23d/fd5Y1sWibKvMljEUZn3O7NMDv7t/aGC2Eu5hajxlRorr", - "vxAT4SkKkyK8U09NTxTgz5w+b/hKjegC6NJkGeN35j7rLxW37CzGRQqTnPUpEvoi3oWnApzNgK4pz7PH", - "8Olz/+gC8J5Yukg+K2AFXCofCfEldMLPkWVkXAkhbUBt5sQRSsE0rCf1usGbz5v/BgAA//94laGN6jMA", - "AA==", + "H4sIAAAAAAAC/+xa3XMbuQ3/VzhsH9eW00v74Def07vmmpu4djp5uMt4KBKSGHFJmuTaVj363zv82NV+", + "cPUVp40z95KJRQAEfgBIAMsnTFWplQTpLD5/wpYuoCThvxdai9W1qhyX82u4q8A6/7M2SoNxHAIRsZbP", + "ZVmzcwdl+M+fDczwOf7TZCN+kmRPrivpeAleNFw0/HhdYLfSgM8xMYas8HpdYAN3FTfA8Plvna0+NbRq", + "+hloYL5UZUkku3HEVfZXooN6jHHHlSTiqqW2MxXkBBggDm6oUUKMG2wcnxEaVhhYarj2G+Bz/P7yLapX", + "kYEZGJAUkDJIKEoEskEw0sQtcIHhkZRaRGsjjz1lpuLsdD6fOLAu/HPu/8GNrtYZLudeV86GCrwBbYAS", + "BwwRwYlFM2WQJCWcovc6opBUMdEDtUqcTQJZR63PampzO3vKjPF7bYDezpAquXPACuQWgBiBUknEwPB7", + "sIg7NDOqTGynK1IKtL9m6kGCuc0hkyIOBQrEGaossAAPraxTJZiTGaFczpHxQYlI5RbK8P8Qz3+a28vA", + "nFtnVrfUAAPpOBEHJEBivmx4dwd/HXa5yH8DAhywGLnDkI2IDEywIVH80gZaFiUNLe6pwz1JEpDT6O/S", + "VuaQVBpoVy/eMj5P3CNJMBqhfwRI7Y5/ABFucQ1WK2lh6IlSsUxOX1bGgHRoEbhRdDcKtO10VMuc/dqo", + "uQFrh2Kv0grSYChIR+YQkBaKMI+wVyzg6pN8pkxJHD7HM6GIPwlL8sjLqsTnr87OClxyGf86a1SQVTkF", + "kwLcuFtGXMa2jwuQ7RMo0IbAb3b0jCc+KnCBZSUEmXpzO3fHSHYEiHJ+yPh14IuFGol2Tax9UCYf85UF", + "MxL3PeWC/BZDS3BW5ZgZFym83tfOOS6rx7KCwYxUwuHzGREWiufLEr9l8G1LnalSAog8LIUSDlfKuJvm", + "4OxaPlWVZLl9igD6LddZTMKaVh0VuXQwj1G8BNAXgt/DB0NmM06zMgSx7pZQx++5W92SICobyPsfluMa", + "aaMosMqM8BnlFFUi7//H2+nKRbga/bh0f3u90a21k1WVoflt3ACNFtxp8aC9ah613C7zgUumHvI6HWJd", + "L/jqXGywTR5oAVqkCNsY3yC0JWL7ZfYgbOHR+aNAbIvPw+5Zr/jt+Oq2ANHVVHC6JR0qI/Jn3Db7uZx/", + "IGYOGetH1fQLVhN6cHbsMv7Y3LEggDplxvuaXEj2UbFg7jmFcQ1rgjEL94vb2/r3QQx3FOhttyWMx4rb", + "rdcNjS2hPbgVpKEVZAedpCPVaDiawZjouK016fC4VGrkHo1B/ewtd4Fj67UlPDZdg/SF1281VrjAppLS", + "k3kqpXX4LVpeNJ3FpwxylWYHop1rR5pISLh1bWk07zi3s3cu+mLYvVPzrQOFlhfGMrABuLdFyDhaGe5W", + "N95ZqZQAYsBcVG6x+eunGpdfPn4IgLbr2V8+fkBOLUHGBpqHgsitkDbqnjMwAQ0v3l9qQdwG1IVzOmjm", + "+es9u+JvFsq4E1+FMHRXgVnVmymDPsL0RtElOESVlEDr0p17xkCM67sibrHZmWj+T/C9jE8fOVN+Y6qk", + "i/m87hv5xlScoct3b5EglaQLsIhIhkoiyRwsCpxcgjmZEroEVk8fiNaC09hSFEjwJfwu56QE5M8eMLZA", + "jDgyJRZsEQQ+wLReO/09qMtdaHUaBXCB/WpU6+z01elZyGUNkmiOz/EP4SdfWrtFcOiEaD65fzWJvZT/", + "Jd1GXQt/Bld3JJ2uCwfhsfJ+yyJhbOqCv0KVG3q7sNlfzs5qJNON34Jg8tn6repR366To9c6Bld1dY4U", + "Ibb/evbD/3Djm3h1oEqSe8JjfxbyqSpLYlYJzj6Ojsxt6IOiJwoc8cafPGvtphg5tuWnLvzvuHU3ieYL", + "wT/k/E6X4HAWMMDmujOAsz1cvPq9GV0bmrTSxsbf5DYDRHtmiuOhDNb9qNjq2QIhN5Zdd28Af4uvB354", + "9Wwq9ODfBTeqr8Qu6tGQHu7bYR+G5ATCaC0UQlmPtEdvX8kjueneXh45+795JKLW90g0pD+thkduXbxa", + "VBpri1WcENmD3fXE2Tqe877+Gborzm4bd2liSAkOjN/iKd6h6YtBukJDrdMFumiB1i+UPn1FJ3Tnzrud", + "UNeA6wK/Pns9PoVN5FI5NAsdb9drcduD8qjIH+M/g3uZyB8Y/l+KuL9Hv/DY8nkw8XVZpcfPrh/D+ld2", + "yfOfh7tmpN/a2RhhRp41JWT3VHwEWrUSLHntKI/XHfjkKf1vPe7960pGndOX3K8RAUVWCG02fCHp/e/Y", + "tfaT8gvT/LqS/btwg8xRzpczPh8topvT9zLSfYNncL9ZHzjiihi76TSTwcPDU+fIjsXUKgF2L1Qj5TeI", + "6wHDuCHmtWFoCStgaBqmHGn6OIQ+ondiqdLAEN2AcgT4Qs33AP6dp3phBUVnzpXB3Nt0DN4iYlFjnf6s", + "IR9HWivj9oD6SsWa/JvD+pCuvvVN8eDOHnmg6uHGs1d5HelHZUwTKfs4c0P7wrJn8PQsd1PU1gU8c5mj", + "exRHAX5XQQW7sf5XIPsOYQ6GjUJch/ddi2oD810CZffxZMA6tW0Kcx0J/mhlvnafG3Heu5epHXdUbrW+", + "uuW9Hp7MprlYon05rs+99/3W3D3W93R8fgXGcuvSQzJlTuLLYWAovi5ApvHNMAjid+hdITBx4TnBHpda", + "5/nBiy9Vuo8p9qlWIgOq8crUi45MRRwygGS1bxqGI3zUvPfKJ+mNX/5Ox3438Rnj9vwIRM8yz7NO6W1A", + "K/3d4hzeN+zCWel+Qf2gzFIowix6WHABSBsIL2DkPHyE3uGG9DynBnHs6/jF1VucXkrhCfZIJaGDz/pR", + "qfgBvQTpwkePNPNBEO5OT9n4qJkzPQ37Q2SdAVJ6Uzy3AWc43BOx4Q7d35A3lXWpKNsos2GMhdmQM/v0", + "IOweHhrYjYQHmNpAmZHi+y/EZXx3wpUMApqmJwkIZ86QN36lRnQBdGmzjOk785D110o4fpLiog6TnPV1", + "JAxFvIlPBQSfAV1RkWdP4TPk/skH4ANxdFH7jME9CKVDJKR33jV+niwj40JK5SJqMy+OUAq2ZT1p1i1e", + "f1r/NwAA//9iQ40TbzQAAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/internal/callbackapi/generated.go b/internal/callbackapi/generated.go new file mode 100644 index 00000000..58c688ff --- /dev/null +++ b/internal/callbackapi/generated.go @@ -0,0 +1,430 @@ +// Package callbackapi provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. +package callbackapi + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/gofiber/fiber/v2" + "github.com/oapi-codegen/runtime" +) + +// WorkerResult defines model for WorkerResult. +type WorkerResult struct { + ArtifactDigest *string `json:"artifact_digest,omitempty"` + Error *string `json:"error,omitempty"` + ScrollYaml *string `json:"scroll_yaml,omitempty"` + Token string `json:"token"` +} + +// Runtime defines model for Runtime. +type Runtime = string + +// CompleteWorkerJSONRequestBody defines body for CompleteWorker for application/json ContentType. +type CompleteWorkerJSONRequestBody = WorkerResult + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string + + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer + + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} + +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error + +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} + +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } +} + +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } +} + +// The interface specification for the client above. +type ClientInterface interface { + // CompleteWorkerWithBody request with any body + CompleteWorkerWithBody(ctx context.Context, runtimeId Runtime, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + CompleteWorker(ctx context.Context, runtimeId Runtime, body CompleteWorkerJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) +} + +func (c *Client) CompleteWorkerWithBody(ctx context.Context, runtimeId Runtime, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewCompleteWorkerRequestWithBody(c.Server, runtimeId, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) CompleteWorker(ctx context.Context, runtimeId Runtime, body CompleteWorkerJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewCompleteWorkerRequest(c.Server, runtimeId, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +// NewCompleteWorkerRequest calls the generic CompleteWorker builder with application/json body +func NewCompleteWorkerRequest(server string, runtimeId Runtime, body CompleteWorkerJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewCompleteWorkerRequestWithBody(server, runtimeId, "application/json", bodyReader) +} + +// NewCompleteWorkerRequestWithBody generates requests for CompleteWorker with any type of body +func NewCompleteWorkerRequestWithBody(server string, runtimeId Runtime, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "runtime_id", runtime.ParamLocationPath, runtimeId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/internal/v1/workers/%s/complete", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // CompleteWorkerWithBodyWithResponse request with any body + CompleteWorkerWithBodyWithResponse(ctx context.Context, runtimeId Runtime, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CompleteWorkerResponse, error) + + CompleteWorkerWithResponse(ctx context.Context, runtimeId Runtime, body CompleteWorkerJSONRequestBody, reqEditors ...RequestEditorFn) (*CompleteWorkerResponse, error) +} + +type CompleteWorkerResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r CompleteWorkerResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r CompleteWorkerResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// CompleteWorkerWithBodyWithResponse request with arbitrary body returning *CompleteWorkerResponse +func (c *ClientWithResponses) CompleteWorkerWithBodyWithResponse(ctx context.Context, runtimeId Runtime, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CompleteWorkerResponse, error) { + rsp, err := c.CompleteWorkerWithBody(ctx, runtimeId, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCompleteWorkerResponse(rsp) +} + +func (c *ClientWithResponses) CompleteWorkerWithResponse(ctx context.Context, runtimeId Runtime, body CompleteWorkerJSONRequestBody, reqEditors ...RequestEditorFn) (*CompleteWorkerResponse, error) { + rsp, err := c.CompleteWorker(ctx, runtimeId, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCompleteWorkerResponse(rsp) +} + +// ParseCompleteWorkerResponse parses an HTTP response from a CompleteWorkerWithResponse call +func ParseCompleteWorkerResponse(rsp *http.Response) (*CompleteWorkerResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &CompleteWorkerResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ServerInterface represents all server handlers. +type ServerInterface interface { + // Complete a pending worker action + // (POST /internal/v1/workers/{runtime_id}/complete) + CompleteWorker(c *fiber.Ctx, runtimeId Runtime) error +} + +// ServerInterfaceWrapper converts contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface +} + +type MiddlewareFunc fiber.Handler + +// CompleteWorker operation middleware +func (siw *ServerInterfaceWrapper) CompleteWorker(c *fiber.Ctx) error { + + var err error + + // ------------- Path parameter "runtime_id" ------------- + var runtimeId Runtime + + err = runtime.BindStyledParameterWithOptions("simple", "runtime_id", c.Params("runtime_id"), &runtimeId, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter runtime_id: %w", err).Error()) + } + + return siw.Handler.CompleteWorker(c, runtimeId) +} + +// FiberServerOptions provides options for the Fiber server. +type FiberServerOptions struct { + BaseURL string + Middlewares []MiddlewareFunc +} + +// RegisterHandlers creates http.Handler with routing matching OpenAPI spec. +func RegisterHandlers(router fiber.Router, si ServerInterface) { + RegisterHandlersWithOptions(router, si, FiberServerOptions{}) +} + +// RegisterHandlersWithOptions creates http.Handler with additional options +func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, options FiberServerOptions) { + wrapper := ServerInterfaceWrapper{ + Handler: si, + } + + for _, m := range options.Middlewares { + router.Use(fiber.Handler(m)) + } + + router.Post(options.BaseURL+"/internal/v1/workers/:runtime_id/complete", wrapper.CompleteWorker) + +} + +// Base64 encoded, gzipped, json marshaled Swagger object +var swaggerSpec = []string{ + + "H4sIAAAAAAAC/4RTsW4bMQz9FYHteIicNtNtbbpkK7J0CAyDlmhHsU5SKZ4Dw7h/LySdawc9oJtNPr53", + "5Hs6g4lDioGCZOjPkJBxICGu/57HIG6g8tMF6CGhvEIHAUsNuHU3zkIHTL9Hx2ShFx6pg2xeacAyKadU", + "0FnYhT1M03RpVolfkQ/Ez5RHL/UDOCZicVS7yOJ2aGRj3Z6yLNB1QMyRFzvZcPR+c8LBL/YlHigsfeLt", + "Oi8zbN1dYHH7RkbaJi7sYmGwlA27JC6WOz0FIQ7olUHvt2gO6tvPJzVmsmp7Uu915awwWGXpqDLxkTjf", + "QQfixBeJHzw6q/7SPN7QQAcF3YRWd/d3q7JKTBQwOejhay111at6Q+1mGn2817O2Pl/Nm3SJgCepPqfY", + "zlxcwLLOk4UeHmdEc6uyX3PycobPTDvo4ZO+pklfIfqSo2ndLktZvkd7KjomBqFQJTEl70wV1W85Vmeu", + "MVqSmHOkP4Ro+uhfiWMt5BRDbrH6snr417RGoriyKDSGkpAtx31YrZY8PqJ3dnZzHmvo+/+ia6RUZDWG", + "Q4jvQaGpwPo4xmFAPt1cXaFKFKwL+8v8DO9AcF8cgFaHdXteLVDVmZE99KDr6Wfw+fKA56FpPf0JAAD/", + "/0i8QQ4HBAAA", +} + +// GetSwagger returns the content of the embedded swagger specification file +// or error if failed to decode +func decodeSpec() ([]byte, error) { + zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) + if err != nil { + return nil, fmt.Errorf("error base64 decoding spec: %w", err) + } + zr, err := gzip.NewReader(bytes.NewReader(zipped)) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + var buf bytes.Buffer + _, err = buf.ReadFrom(zr) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + + return buf.Bytes(), nil +} + +var rawSpec = decodeSpecCached() + +// a naive cached of a decoded swagger spec +func decodeSpecCached() func() ([]byte, error) { + data, err := decodeSpec() + return func() ([]byte, error) { + return data, err + } +} + +// Constructs a synthetic filesystem for resolving external references when loading openapi specifications. +func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { + res := make(map[string]func() ([]byte, error)) + if len(pathToFile) > 0 { + res[pathToFile] = rawSpec + } + + return res +} + +// GetSwagger returns the Swagger specification corresponding to the generated code +// in this file. The external references of Swagger specification are resolved. +// The logic of resolving external references is tightly connected to "import-mapping" feature. +// Externally referenced files must be embedded in the corresponding golang packages. +// Urls can be supported but this task was out of the scope. +func GetSwagger() (swagger *openapi3.T, err error) { + resolvePath := PathToRawSpec("") + + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { + pathToFile := url.String() + pathToFile = path.Clean(pathToFile) + getSpec, ok := resolvePath[pathToFile] + if !ok { + err1 := fmt.Errorf("path not found: %s", pathToFile) + return nil, err1 + } + return getSpec() + } + var specData []byte + specData, err = rawSpec() + if err != nil { + return + } + swagger, err = loader.LoadFromData(specData) + if err != nil { + return + } + return +} diff --git a/internal/core/domain/broadcast_channel.go b/internal/core/domain/broadcast_channel.go index 07fea1c1..7d079643 100644 --- a/internal/core/domain/broadcast_channel.go +++ b/internal/core/domain/broadcast_channel.go @@ -48,6 +48,9 @@ func (h *BroadcastChannel) Unsubscribe(client chan *[]byte) { // Broadcast sends data to all clients, returns false if dropped func (h *BroadcastChannel) Broadcast(data []byte) bool { + h.mu.RLock() + defer h.mu.RUnlock() + if h.closed { return false } diff --git a/internal/core/domain/oci.go b/internal/core/domain/oci.go index c0aaa86f..dfa400d2 100644 --- a/internal/core/domain/oci.go +++ b/internal/core/domain/oci.go @@ -5,14 +5,19 @@ import "sync/atomic" type ArtifactType string const ( - ArtifactTypeScrollRoot ArtifactType = "application/vnd.highcard.druid.scroll.config.v1+json" - ArtifactTypeScrollFs ArtifactType = "application/vnd.highcard.druid.scroll-fs.config.v1+json" - ArtifactTypeScrollData ArtifactType = "application/vnd.highcard.druid.scroll-data.config.v1+json" - ArtifactTypeScrollMeta ArtifactType = "application/vnd.highcard.druid.scroll-meta.config.v1+json" + ArtifactTypeRuntimeRoot ArtifactType = "application/vnd.highcard.druid.scroll.config.v1+json" + ArtifactTypeScrollFs ArtifactType = "application/vnd.highcard.druid.scroll-fs.config.v1+json" + ArtifactTypeScrollData ArtifactType = "application/vnd.highcard.druid.scroll-data.config.v1+json" + ArtifactTypeScrollMeta ArtifactType = "application/vnd.highcard.druid.scroll-meta.config.v1+json" +) + +const ( + SnapshotProgressModeIdle = "idle" + SnapshotProgressModeBackup = "backup" + SnapshotProgressModeRestore = "restore" ) // SnapshotProgress tracks the state of a data pull/push operation. -// Mode values: "noop" (idle), "backup" (pushing data), "restore" (pulling data chunks). type SnapshotProgress struct { Percentage atomic.Int64 Mode atomic.Value // stores string @@ -20,7 +25,7 @@ type SnapshotProgress struct { func NewSnapshotProgress() *SnapshotProgress { sp := &SnapshotProgress{} - sp.Mode.Store("noop") + sp.Mode.Store(SnapshotProgressModeIdle) return sp } diff --git a/internal/core/domain/queue_item.go b/internal/core/domain/queue_item.go index 8fcdea92..4966ac88 100644 --- a/internal/core/domain/queue_item.go +++ b/internal/core/domain/queue_item.go @@ -1,10 +1,9 @@ package domain type QueueItem struct { - Name string - Status ScrollLockStatus - Error error - RunAfterExecution func() - DoneChan chan struct{} - RestartCount uint + Name string + Status ScrollLockStatus + Error error + DoneChan chan struct{} + RestartCount uint } diff --git a/internal/core/domain/registry.go b/internal/core/domain/registry.go index 8539539a..edb72aaa 100644 --- a/internal/core/domain/registry.go +++ b/internal/core/domain/registry.go @@ -1,7 +1,7 @@ package domain type RegistryCredential struct { - Host string `mapstructure:"host"` - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` + Host string `json:"host" mapstructure:"host" yaml:"host"` + Username string `json:"username" mapstructure:"username" yaml:"username"` + Password string `json:"password" mapstructure:"password" yaml:"password"` } diff --git a/internal/core/domain/runtime.go b/internal/core/domain/runtime.go index fc886188..c797dbf5 100644 --- a/internal/core/domain/runtime.go +++ b/internal/core/domain/runtime.go @@ -3,38 +3,3 @@ package domain const DefaultExecImage = "bash:latest" const RuntimeDataDir = "data" -const RuntimeConfigDir = ".druid" -const RuntimeConfigFile = "runtime.json" - -type RuntimeConfig struct { - SchemaVersion string `json:"schemaVersion"` - Scroll RuntimeConfigScroll `json:"scroll"` - Paths RuntimeConfigPaths `json:"paths"` - Ports []Port `json:"ports"` - ExpectedPorts []RuntimeExpectedPort `json:"expectedPorts,omitempty"` - Runtime RuntimeConfigRuntime `json:"runtime"` -} - -type RuntimeConfigScroll struct { - ID string `json:"id"` - Name string `json:"name"` - Artifact string `json:"artifact"` -} - -type RuntimeConfigPaths struct { - Data string `json:"data"` - RuntimeConfig string `json:"runtimeConfig"` -} - -type RuntimeExpectedPort struct { - Name string `json:"name"` - Procedure string `json:"procedure"` - Port int `json:"port"` - Protocol string `json:"protocol"` - KeepAliveTraffic string `json:"keepAliveTraffic,omitempty"` -} - -type RuntimeConfigRuntime struct { - Backend string `json:"backend"` - GeneratedAt string `json:"generatedAt"` -} diff --git a/internal/core/domain/runtime_scroll.go b/internal/core/domain/runtime_scroll.go index 377cf656..078febc8 100644 --- a/internal/core/domain/runtime_scroll.go +++ b/internal/core/domain/runtime_scroll.go @@ -13,19 +13,19 @@ const ( ) type RuntimeScroll struct { - ID string `json:"id"` - OwnerID string `json:"owner_id,omitempty"` - Artifact string `json:"artifact"` - ScrollRoot string `json:"scroll_root"` - DataRoot string `json:"data_root"` - ScrollName string `json:"scroll_name"` - ScrollYAML string `json:"-"` - Status RuntimeScrollStatus `json:"status"` - LastError string `json:"last_error,omitempty"` - Routing []RuntimeRouteAssignment `json:"routing,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Commands map[string]LockStatus `json:"commands,omitempty"` + ID string `json:"id"` + OwnerID string `json:"owner_id,omitempty"` + Artifact string `json:"artifact"` + ArtifactDigest string `json:"artifact_digest,omitempty"` + Root string `json:"root"` + ScrollName string `json:"scroll_name"` + ScrollYAML string `json:"-"` + Status RuntimeScrollStatus `json:"status"` + LastError string `json:"last_error,omitempty"` + Routing []RuntimeRouteAssignment `json:"routing,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Commands map[string]LockStatus `json:"commands,omitempty"` } type RuntimeState struct { diff --git a/internal/core/domain/scroll.go b/internal/core/domain/scroll.go index c76e8ff3..7c08c7af 100644 --- a/internal/core/domain/scroll.go +++ b/internal/core/domain/scroll.go @@ -25,15 +25,11 @@ const ( RunModePersistent RunMode = "persistent" //restarts on failure and on program restart ) -type Cronjob struct { - Name string `yaml:"name"` - Schedule string `yaml:"schedule"` - Command string `yaml:"command"` -} type Chunks struct { - Name string `yaml:"name"` - Path string `yaml:"path"` - Chunks []*Chunks `yaml:"chunks,omitempty" json:"chunks,omitempty"` + Name string `yaml:"name"` + Path string `yaml:"path"` + SkipUpdate bool `yaml:"skip_update,omitempty" json:"skip_update,omitempty"` + Chunks []*Chunks `yaml:"chunks,omitempty" json:"chunks,omitempty"` } type ColdStarterVars struct { @@ -69,7 +65,6 @@ type File struct { Serve string `yaml:"serve" json:"serve"` Ports []Port `yaml:"ports" json:"ports"` Commands map[string]*CommandInstructionSet `yaml:"commands" json:"commands"` - Cronjobs []*Cronjob `yaml:"cronjobs" json:"cronjobs"` Chunks []*Chunks `yaml:"chunks" json:"chunks"` } @@ -303,13 +298,13 @@ func (sc *Scroll) Validate(strict bool) error { return fmt.Errorf("procedure is required") } if p.Mode != "" { - return fmt.Errorf("procedure uses legacy mode %q; use type: container or type: signal", p.Mode) + return fmt.Errorf("procedure field mode is unsupported; use type: container or type: signal") } if p.Wait != nil { - return fmt.Errorf("procedure uses legacy wait; waits are no longer supported") + return fmt.Errorf("procedure field wait is unsupported") } if p.Data != nil { - return fmt.Errorf("procedure uses legacy data; use container command fields or type: signal") + return fmt.Errorf("procedure field data is unsupported; use container command fields or type: signal") } switch p.Kind() { case ProcedureTypeContainer: @@ -339,7 +334,7 @@ func (sc *Scroll) Validate(strict bool) error { } clean := filepath.Clean(mount.SubPath) if clean == ".." || strings.HasPrefix(clean, "../") { - return fmt.Errorf("mount sub_path %s escapes data root", mount.SubPath) + return fmt.Errorf("mount sub_path %s escapes runtime root", mount.SubPath) } } for _, expectedPort := range p.ExpectedPorts { @@ -421,9 +416,12 @@ const ScrollDataDir = "data" const DataLoadedMarkerFile = ".data-loaded" var ScrollFiles = map[string]ArtifactType{ - "update": ArtifactTypeScrollFs, - "scroll.yaml": ArtifactTypeScrollFs, - "public": ArtifactTypeScrollFs, - "private": ArtifactTypeScrollFs, - "data": ArtifactTypeScrollData, + "update": ArtifactTypeScrollFs, + "scroll.yaml": ArtifactTypeScrollFs, + "public": ArtifactTypeScrollFs, + "private": ArtifactTypeScrollFs, + "packet_handler": ArtifactTypeScrollFs, + "scroll-config.yml.scroll_template": ArtifactTypeScrollFs, + "data": ArtifactTypeScrollData, + ".meta": ArtifactTypeScrollFs, } diff --git a/internal/core/domain/scroll_test.go b/internal/core/domain/scroll_test.go index 63b9fd30..a34b736e 100644 --- a/internal/core/domain/scroll_test.go +++ b/internal/core/domain/scroll_test.go @@ -35,7 +35,7 @@ func TestSignalProcedureValidation(t *testing.T) { } } -func TestLegacyProcedureFieldsRejected(t *testing.T) { +func TestUnsupportedProcedureFieldsRejected(t *testing.T) { tests := []struct { name string procedure *Procedure @@ -44,17 +44,17 @@ func TestLegacyProcedureFieldsRejected(t *testing.T) { { name: "mode", procedure: &Procedure{Mode: "scroll-switch"}, - want: "legacy mode", + want: "field mode is unsupported", }, { name: "wait", procedure: &Procedure{Image: "alpine:3.20", Wait: false}, - want: "legacy wait", + want: "field wait is unsupported", }, { name: "data", procedure: &Procedure{Image: "alpine:3.20", Data: "start"}, - want: "legacy data", + want: "field data is unsupported", }, } diff --git a/internal/core/ports/services_ports.go b/internal/core/ports/services_ports.go index f564db80..5effaa13 100644 --- a/internal/core/ports/services_ports.go +++ b/internal/core/ports/services_ports.go @@ -11,9 +11,15 @@ import ( ) type AuthorizerServiceInterface interface { - CheckHeader(r *fiber.Ctx) (*time.Time, error) - CheckQuery(token string) (*time.Time, error) - GenerateQueryToken() string + CheckHeader(r *fiber.Ctx) (*AuthContext, error) + CheckQuery(runtimeID string, token string) (*AuthContext, error) + GenerateQueryToken(runtimeID string, ownerID string) string +} + +type AuthContext struct { + Subject string + RuntimeID string + ExpiresAt *time.Time } type ScrollServiceInterface interface { @@ -36,50 +42,74 @@ type LogManagerInterface interface { type RuntimeBackendInterface interface { Name() string - ReadScrollFile(scrollRoot string) ([]byte, error) + ReadScrollFile(root string) ([]byte, error) + StartDev(ctx context.Context, action RuntimeDevAction) error + StopDev(ctx context.Context, root string) error RunCommand(command RuntimeCommand) (*int, error) - ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) + ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) + RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) + StopRuntime(root string) error + DeleteRuntime(root string, purgeData bool) error + BackupRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error + RestoreRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error + SpawnPullWorker(ctx context.Context, action RuntimeWorkerAction) error Attach(commandName string, data string) error - Signal(commandName string, target string, signal string, dataRoot string) error -} - -type RuntimeLifecycleBackendInterface interface { - StopRuntime(dataRoot string) error - DeleteRuntime(dataRoot string, purgeData bool) error -} - -type RuntimeRoutingBackendInterface interface { - RoutingTargets(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) -} - -type RuntimeBackupBackendInterface interface { - BackupRuntime(ctx context.Context, dataRoot string, artifact string) error - RestoreRuntime(ctx context.Context, dataRoot string, artifact string) error -} - -type RuntimeFileBackendInterface interface { - ReadDataFile(ctx context.Context, dataRoot string, relativePath string) ([]byte, error) - WriteDataFile(ctx context.Context, dataRoot string, relativePath string, data []byte) error + Signal(commandName string, target string, signal string, root string) error } type RuntimeCommand struct { Name string ScrollID string Command *domain.CommandInstructionSet - DataRoot string + Root string GlobalPorts []domain.Port ProcedureEnv map[string]map[string]string } type RuntimeMaterialization struct { - Artifact string - ScrollRoot string - DataRoot string - ScrollYAML []byte + Artifact string + ArtifactDigest string + Root string + ScrollYAML []byte } -type RuntimeMaterializerInterface interface { - MaterializeScroll(ctx context.Context, artifact string, requestedName string) (*RuntimeMaterialization, error) +type RuntimeWorkerMode string + +const ( + RuntimeWorkerModeCreate RuntimeWorkerMode = "create" + RuntimeWorkerModeUpdate RuntimeWorkerMode = "update" +) + +type RuntimeWorkerAction struct { + Mode RuntimeWorkerMode + RuntimeID string + Artifact string + RootRef string + MountPath string + CallbackURL string + CallbackToken string + RegistryCredentials []domain.RegistryCredential +} + +type RuntimeWorkerResult struct { + ScrollYAML string `json:"scroll_yaml,omitempty"` + ArtifactDigest string `json:"artifact_digest,omitempty"` + Error string `json:"error,omitempty"` +} + +type RuntimeDevAction struct { + RuntimeID string + RootRef string + MountPath string + Listen string + WatchPaths []string + HotReloadCommands []string + Routing []domain.RuntimeRouteAssignment + DaemonURL string + DaemonToken string + OwnerID string + AuthJWKSURL string + RuntimeJWKSURL string } type BroadcastChannelInterface interface { @@ -95,20 +125,16 @@ type ConsoleManagerInterface interface { type OciRegistryInterface interface { GetRepo(repoUrl string) (*remote.Repository, error) + FetchFile(artifact string, filePath string) ([]byte, error) + ResolveDigest(artifact string) (string, error) Pull(dir string, artifact string) error PullSelective(dir string, artifact string, includeData bool, progress *domain.SnapshotProgress) error CanUpdateTag(descriptor v1.Descriptor, folder string, tag string) (bool, error) Push(folder string, repo string, tag string, overrides map[string]string, packMeta bool, scrollFile *domain.File) (v1.Descriptor, error) } -type CronManagerInterface interface { - Init() -} - type QueueManagerInterface interface { - AddAndRememberItem(cmd string) error AddTempItem(cmd string) error - AddShutdownItem(cmd string) error AddTempItemWithWait(cmd string) error GetQueue() map[string]domain.ScrollLockStatus } @@ -129,7 +155,6 @@ type ColdStarterPacketHandlerInterface interface { type ColdStarterInterface interface { Stop() - StopWithDeplay(uint) Finish(*domain.AugmentedPort) } @@ -145,6 +170,7 @@ type UiServiceInterface interface { type WatchServiceInterface interface { StartWatching(basePath string, paths ...string) error StopWatching() error + Trigger() Subscribe() chan *[]byte Unsubscribe(client chan *[]byte) GetWatchedPaths() []string diff --git a/internal/core/services/authorizer_service.go b/internal/core/services/authorizer_service.go index 541e74e3..105d5768 100644 --- a/internal/core/services/authorizer_service.go +++ b/internal/core/services/authorizer_service.go @@ -1,9 +1,14 @@ package services import ( + "crypto/rand" + "crypto/rsa" + "encoding/base64" "encoding/json" "errors" + "math/big" "strings" + "sync" "time" "github.com/MicahParks/keyfunc" @@ -15,11 +20,15 @@ import ( "go.uber.org/zap" ) +const queryTokenTTL = 5 * time.Minute + type AuthorizerService struct { - jwksUrl string - jwks *keyfunc.JWKS - userId string - tokens map[string]time.Time + jwksUrl string + jwks *keyfunc.JWKS + userId string + runtimeKey *rsa.PrivateKey + keyID string + mu sync.Mutex } func NewAuthorizer(jwksURL string, userId string) (ports.AuthorizerServiceInterface, error) { @@ -40,21 +49,39 @@ func NewAuthorizer(jwksURL string, userId string) (ports.AuthorizerServiceInterf return nil, err } - return &AuthorizerService{ + auth := &AuthorizerService{ jwks: jwks, jwksUrl: jwksURL, userId: userId, - tokens: make(map[string]time.Time), - }, nil + } + auth.ensureRuntimeKey() + return auth, nil } else { - return &AuthorizerService{ - tokens: make(map[string]time.Time), - }, nil + auth := &AuthorizerService{} + auth.ensureRuntimeKey() + return auth, nil } } -func (auth *AuthorizerService) CheckHeader(c *fiber.Ctx) (*time.Time, error) { +func NewRuntimeTokenVerifier(jwksURL string) (ports.AuthorizerServiceInterface, error) { + if jwksURL == "" { + return NewAuthorizer("", "") + } + options := keyfunc.Options{ + RefreshInterval: time.Hour, + RefreshErrorHandler: func(err error) { + logger.Log().Error("There was an error with the runtime jwt.KeyFunc", zap.Error(err)) + }, + } + jwks, err := keyfunc.Get(jwksURL, options) + if err != nil { + return nil, err + } + return &AuthorizerService{jwks: jwks, jwksUrl: jwksURL}, nil +} + +func (auth *AuthorizerService) CheckHeader(c *fiber.Ctx) (*ports.AuthContext, error) { if auth.jwksUrl == "" { return nil, nil @@ -102,32 +129,102 @@ func (auth *AuthorizerService) CheckHeader(c *fiber.Ctx) (*time.Time, error) { tm = time.Unix(v, 0) } - return &tm, nil + subject, _ := claims["sub"].(string) + return &ports.AuthContext{Subject: subject, ExpiresAt: &tm}, nil } -func (auth *AuthorizerService) CheckQuery(token string) (*time.Time, error) { - if validUntil, ok := auth.tokens[token]; ok { - defer delete(auth.tokens, token) - if validUntil.After(time.Now()) { - return &validUntil, nil +func (auth *AuthorizerService) CheckQuery(runtimeID string, tokenString string) (*ports.AuthContext, error) { + if tokenString == "" { + return nil, errors.New("missing token") + } + keyFunc := func(token *jwt.Token) (any, error) { + if auth.runtimeKey != nil { + return &auth.runtimeKey.PublicKey, nil } + if auth.jwks != nil { + return auth.jwks.Keyfunc(token) + } + return nil, errors.New("runtime token verifier is not configured") } - return nil, errors.New("no valid token found") - + token, err := jwt.Parse(tokenString, keyFunc) + if err != nil || !token.Valid { + return nil, errors.New("invalid token") + } + claims, _ := token.Claims.(jwt.MapClaims) + if claims == nil { + return nil, errors.New("couldn't parse claims") + } + if expected, _ := claims["runtime_id"].(string); runtimeID != "" && expected != runtimeID { + return nil, errors.New("runtime token does not match runtime") + } + expires, ok := claimTime(claims["exp"]) + if !ok || time.Now().After(expires) { + return nil, errors.New("runtime token expired") + } + subject, _ := claims["sub"].(string) + claimRuntimeID, _ := claims["runtime_id"].(string) + return &ports.AuthContext{Subject: subject, RuntimeID: claimRuntimeID, ExpiresAt: &expires}, nil } -func (auth *AuthorizerService) GenerateQueryToken() string { - - token, _ := utils.GenerateRandomStringURLSafe(16) +func (auth *AuthorizerService) GenerateQueryToken(runtimeID string, ownerID string) string { + auth.ensureRuntimeKey() + expires := time.Now().Add(queryTokenTTL) + claims := jwt.MapClaims{ + "sub": ownerID, + "runtime_id": runtimeID, + "scope": "runtime", + "exp": expires.Unix(), + "iat": time.Now().Unix(), + } + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + token.Header["kid"] = auth.keyID + signed, err := token.SignedString(auth.runtimeKey) + if err != nil { + logger.Log().Error("failed to sign runtime query token", zap.Error(err)) + return "" + } + return signed +} - //TODO: it is not required to save the expire date in the map bcs of the cleanup below - auth.tokens[token] = time.Now().Add(time.Minute * 5) // TODO: configuration +func (auth *AuthorizerService) JWKS() map[string]any { + auth.ensureRuntimeKey() + pub := auth.runtimeKey.PublicKey + return map[string]any{ + "keys": []map[string]any{{ + "kty": "RSA", + "use": "sig", + "kid": auth.keyID, + "alg": "RS256", + "n": base64.RawURLEncoding.EncodeToString(pub.N.Bytes()), + "e": base64.RawURLEncoding.EncodeToString(big.NewInt(int64(pub.E)).Bytes()), + }}, + } +} - t := time.NewTimer(time.Minute * 5) - go func() { - <-t.C - delete(auth.tokens, token) - }() +func (auth *AuthorizerService) ensureRuntimeKey() { + auth.mu.Lock() + defer auth.mu.Unlock() + if auth.runtimeKey != nil { + return + } + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + logger.Log().Error("failed to generate runtime token key", zap.Error(err)) + return + } + keyID, _ := utils.GenerateRandomStringURLSafe(12) + auth.runtimeKey = key + auth.keyID = keyID +} - return token +func claimTime(value any) (time.Time, bool) { + switch typed := value.(type) { + case float64: + return time.Unix(int64(typed), 0), true + case json.Number: + v, _ := typed.Int64() + return time.Unix(v, 0), true + default: + return time.Time{}, false + } } diff --git a/internal/core/services/coldstarter.go b/internal/core/services/coldstarter.go index bf7265ed..89e3151f 100644 --- a/internal/core/services/coldstarter.go +++ b/internal/core/services/coldstarter.go @@ -2,8 +2,8 @@ package services import ( "context" - "fmt" "path/filepath" + "strings" "sync" "time" @@ -16,20 +16,18 @@ import ( ) type ColdStarter struct { - handler map[string]ports.ColdStarterServerInterface - finishCount uint - dir string - finishTime *time.Time - portsService ports.PortServiceInterface - finishChan chan *domain.AugmentedPort - chandlers []ports.ColdStarterHandlerInterface - queueManager ports.QueueManagerInterface - handlerMu sync.Mutex - progress *domain.SnapshotProgress - OnBeforeFinish func(progress *domain.SnapshotProgress) // optional hook called before signaling finish + handler map[string]ports.ColdStarterServerInterface + dir string + finishTime *time.Time + finishOnce sync.Once + portsService ports.PortServiceInterface + finishChan chan *domain.AugmentedPort + chandlers []ports.ColdStarterHandlerInterface + queueManager ports.QueueManagerInterface + handlerMu sync.Mutex + progress *domain.SnapshotProgress } -// NewColdStarter initializes the ColdStarter struct with proper channel initialization and no initial finishTime. func NewColdStarter( portsService ports.PortServiceInterface, queueManager ports.QueueManagerInterface, @@ -37,7 +35,6 @@ func NewColdStarter( ) *ColdStarter { return &ColdStarter{ handler: make(map[string]ports.ColdStarterServerInterface), - finishCount: 0, dir: dir, finishTime: nil, portsService: portsService, @@ -49,9 +46,7 @@ func NewColdStarter( } } -// Start initializes the finishChan channel and begins serving in a separate goroutine. func (c *ColdStarter) Start(ctx context.Context) chan *domain.AugmentedPort { - // Ensure finishChan is properly initialized. c.finishChan = make(chan *domain.AugmentedPort) go c.Serve(ctx) @@ -59,17 +54,6 @@ func (c *ColdStarter) Start(ctx context.Context) chan *domain.AugmentedPort { return c.finishChan } -// FinishCount returns the current count of finished ports. -func (c *ColdStarter) FinishCount() uint { - return c.finishCount -} - -// GetProgress returns the snapshot progress tracker used by the coldstarter. -func (c *ColdStarter) GetProgress() *domain.SnapshotProgress { - return c.progress -} - -// Serve starts the servers for each port and listens for context cancellation or errors. func (c *ColdStarter) Serve(ctx context.Context) { augmentedPorts := c.portsService.GetPorts() @@ -82,71 +66,67 @@ func (c *ColdStarter) Serve(ctx context.Context) { c.handler = make(map[string]ports.ColdStarterServerInterface, len(augmentedPorts)) for _, port := range augmentedPorts { + port := port var sleepHandler string if port.SleepHandler == nil { - logger.Log().Warn(fmt.Sprintf("No sleep handler found for port %d, skipping", port.Port.Port)) + logger.Log().Warn("Skipping coldstarter port without sleep handler", zap.Int("port", port.Port.Port), zap.String("port_name", port.Name)) continue - } else { - sleepHandler = *port.SleepHandler } + sleepHandler = *port.SleepHandler - path := filepath.Join(c.dir, domain.ScrollDataDir, "coldstart", sleepHandler) - - go func(port *domain.AugmentedPort) { - var handler ports.ColdStarterHandlerInterface - - if sleepHandler == "generic" { - handler = lua.NewGenericReturnHandler() - } else { - vars := make(map[string]string, len(port.Vars)) - for _, v := range port.Vars { - vars[v.Name] = v.Value - } - handler = lua.NewLuaHandler(c.queueManager, path, c.dir, vars, augmentedPortMap, c.progress) + var handler ports.ColdStarterHandlerInterface + if sleepHandler == "generic" { + handler = lua.NewGenericReturnHandler() + } else { + path := filepath.Join(c.dir, filepath.Clean(sleepHandler)) + if rel, err := filepath.Rel(c.dir, path); err != nil || rel == ".." || filepath.IsAbs(rel) || strings.HasPrefix(rel, "../") { + logger.Log().Error("Invalid coldstarter handler path", zap.String("sleep_handler", sleepHandler)) + continue + } + vars := make(map[string]string, len(port.Vars)) + for _, v := range port.Vars { + vars[v.Name] = v.Value } + handler = lua.NewLuaHandler(c.queueManager, path, c.dir, vars, augmentedPortMap, c.progress) + } - c.chandlers = append(c.chandlers, handler) + c.chandlers = append(c.chandlers, handler) - // Use the Finish method to handle sending to finishChan. - finishFunc := func() { - c.Finish(port) - } + finishFunc := func() { + c.Finish(port) + } - if port.Protocol == "udp" { - logger.Log().Info(fmt.Sprintf("Starting UDP server on port %d", port.Port.Port), zap.String("sleep_handler", sleepHandler), zap.String("port_name", port.Name)) - udpServer := servers.NewUDP(handler) - err := udpServer.Start(port.Port.Port, finishFunc) - if err != nil { - return - } - c.handlerMu.Lock() - defer c.handlerMu.Unlock() - c.handler[port.Name] = udpServer - } else if port.Protocol == "tcp" || port.Protocol == "http" || port.Protocol == "https" || port.Protocol == "" { - logger.Log().Info(fmt.Sprintf("Starting TCP server on port %d", port.Port.Port)) - tcpServer := servers.NewTCP(handler) - err := tcpServer.Start(port.Port.Port, finishFunc) - if err != nil { - return - } - c.handlerMu.Lock() - defer c.handlerMu.Unlock() - c.handler[port.Name] = tcpServer - } else { - return + var server ports.ColdStarterServerInterface + switch port.Protocol { + case "udp": + logger.Log().Info("Starting UDP coldstarter", zap.Int("port", port.Port.Port), zap.String("sleep_handler", sleepHandler), zap.String("port_name", port.Name)) + server = servers.NewUDP(handler) + case "tcp", "http", "https", "": + logger.Log().Info("Starting TCP coldstarter", zap.Int("port", port.Port.Port), zap.String("sleep_handler", sleepHandler), zap.String("port_name", port.Name)) + server = servers.NewTCP(handler) + default: + logger.Log().Warn("Unsupported coldstarter protocol", zap.String("protocol", port.Protocol), zap.String("port_name", port.Name)) + continue + } + if err := server.Start(port.Port.Port, finishFunc); err != nil { + logger.Log().Error("Failed to start coldstarter listener", zap.Error(err), zap.String("port_name", port.Name), zap.Int("port", port.Port.Port)) + continue + } + c.handlerMu.Lock() + c.handler[port.Name] = server + c.handlerMu.Unlock() + + srv := server + go func() { + <-ctx.Done() + if err := srv.Close(); err != nil { + logger.Log().Warn("Failed to close coldstarter listener", zap.Error(err)) } - }(port) + }() } } -func (c *ColdStarter) StopWithDeplay(startDelay uint) { - logger.Log().Info("Stopping ColdStarter with deplay", zap.Uint("startDelay", startDelay)) - time.Sleep(time.Duration(startDelay) * time.Second) - c.Stop() -} - -// Stop sends a nil error to the serveDone channel to gracefully stop the Serve function. func (c *ColdStarter) Stop() { logger.Log().Info("Stopping ColdStarter") @@ -158,29 +138,18 @@ func (c *ColdStarter) Stop() { } } -// Finish increments the finishCount, logs, and sends the port to the finishChan channel. -// Before signaling finish it runs the optional OnBeforeFinish hook (set by cmd/serve) -// which pulls OCI data layers when the .data-loaded marker is absent. func (c *ColdStarter) Finish(port *domain.AugmentedPort) { - if c.finishTime == nil { + c.finishOnce.Do(func() { now := time.Now() c.finishTime = &now - for _, handler := range c.chandlers { handler.SetFinishedAt(c.finishTime) } - } - if port == nil { - logger.Log().Info("Received finish signal without port") - } else { - logger.Log().Info(fmt.Sprintf("Server on port %d received finish signal", port.Port.Port)) - } - - // Run optional before-finish hook (e.g. pull data from registry) - if c.OnBeforeFinish != nil { - c.OnBeforeFinish(c.progress) - } - - c.finishChan <- port - c.finishCount++ + if port == nil { + logger.Log().Info("Received coldstarter finish signal") + } else { + logger.Log().Info("Coldstarter port finished", zap.Int("port", port.Port.Port), zap.String("port_name", port.Name)) + } + c.finishChan <- port + }) } diff --git a/internal/core/services/coldstarter/handler/lua_handler.go b/internal/core/services/coldstarter/handler/lua_handler.go index 7bd60691..03b8695c 100644 --- a/internal/core/services/coldstarter/handler/lua_handler.go +++ b/internal/core/services/coldstarter/handler/lua_handler.go @@ -197,7 +197,7 @@ func (handler *LuaHandler) GetHandler(funcs map[string]func(data ...string)) (po if handler.progress != nil { l.Push(lua.LString(handler.progress.Mode.Load().(string))) } else { - l.Push(lua.LString("noop")) + l.Push(lua.LString(domain.SnapshotProgressModeIdle)) } return 1 }, diff --git a/internal/core/services/cron_manager.go b/internal/core/services/cron_manager.go deleted file mode 100644 index f1b0dd59..00000000 --- a/internal/core/services/cron_manager.go +++ /dev/null @@ -1,50 +0,0 @@ -package services - -import ( - "time" - - "github.com/go-co-op/gocron" - "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/ports" - "github.com/highcard-dev/daemon/internal/utils/logger" - "go.uber.org/zap" -) - -type CronManager struct { - crons []*domain.Cronjob - queueManager ports.QueueManagerInterface -} - -func NewCronManager(cronjobs []*domain.Cronjob, queueManager ports.QueueManagerInterface) *CronManager { - return &CronManager{ - crons: cronjobs, - queueManager: queueManager, - } -} - -func (c *CronManager) Init() error { - scheduler := gocron.NewScheduler(time.UTC) - for _, cron := range c.crons { - //gocron - _, err := scheduler.Cron(cron.Schedule).Do(func() { - logger.Log().Info("Cronjob started", zap.String("name", cron.Name)) - - //run cron.Command e.g. main.start - - err := c.queueManager.AddTempItem(cron.Command) - - if err != nil { - logger.Log().Error("error running cronjob", zap.String("name", cron.Name), zap.Error(err)) - } else { - logger.Log().Info("Cronjob finished", zap.String("name", cron.Name)) - } - }) - if err != nil { - return err - } - - } - scheduler.StartAsync() - return nil - -} diff --git a/internal/core/services/procedure_launcher.go b/internal/core/services/procedure_launcher.go index 90807818..68048ff6 100644 --- a/internal/core/services/procedure_launcher.go +++ b/internal/core/services/procedure_launcher.go @@ -12,7 +12,7 @@ import ( type ProcedureLauncher struct { runtimeBackend ports.RuntimeBackendInterface - runtimeDataRoot string + runtimeRoot string runtimeScrollID string runtimeScrollName string routingProvider func() []domain.RuntimeRouteAssignment @@ -24,24 +24,24 @@ type ProcedureLauncher struct { func NewProcedureLauncher( scrollService ports.ScrollServiceInterface, runtimeBackend ports.RuntimeBackendInterface, - runtimeDataRoot string, + runtimeRoot string, ) (*ProcedureLauncher, error) { - return NewProcedureLauncherForScroll(scrollService, runtimeBackend, runtimeDataRoot, "") + return NewProcedureLauncherForScroll(scrollService, runtimeBackend, runtimeRoot, "") } func NewProcedureLauncherForScroll( scrollService ports.ScrollServiceInterface, runtimeBackend ports.RuntimeBackendInterface, - runtimeDataRoot string, + runtimeRoot string, runtimeScrollID string, ) (*ProcedureLauncher, error) { - return NewProcedureLauncherForRuntime(scrollService, runtimeBackend, runtimeDataRoot, runtimeScrollID, "", nil) + return NewProcedureLauncherForRuntime(scrollService, runtimeBackend, runtimeRoot, runtimeScrollID, "", nil) } func NewProcedureLauncherForRuntime( scrollService ports.ScrollServiceInterface, runtimeBackend ports.RuntimeBackendInterface, - runtimeDataRoot string, + runtimeRoot string, runtimeScrollID string, runtimeScrollName string, routingProvider func() []domain.RuntimeRouteAssignment, @@ -52,7 +52,7 @@ func NewProcedureLauncherForRuntime( s := &ProcedureLauncher{ runtimeBackend: runtimeBackend, - runtimeDataRoot: runtimeDataRoot, + runtimeRoot: runtimeRoot, runtimeScrollID: runtimeScrollID, runtimeScrollName: runtimeScrollName, routingProvider: routingProvider, @@ -88,9 +88,9 @@ func (sc *ProcedureLauncher) Run(cmd string) error { zap.String("runMode", string(command.Run)), ) - dataRoot := sc.runtimeDataRoot - if dataRoot == "" { - dataRoot = sc.scrollService.GetCwd() + root := sc.runtimeRoot + if root == "" { + root = sc.scrollService.GetCwd() } file := sc.scrollService.GetFile() routing := []domain.RuntimeRouteAssignment{} @@ -112,7 +112,7 @@ func (sc *ProcedureLauncher) Run(cmd string) error { Name: cmd, ScrollID: sc.runtimeScrollID, Command: command, - DataRoot: dataRoot, + Root: root, GlobalPorts: file.Ports, ProcedureEnv: procedureEnv, }) diff --git a/internal/core/services/procedure_launcher_test.go b/internal/core/services/procedure_launcher_test.go index 66530384..42aaa768 100644 --- a/internal/core/services/procedure_launcher_test.go +++ b/internal/core/services/procedure_launcher_test.go @@ -34,8 +34,8 @@ func TestProcedureLauncherPassesCommandContextToRuntimeBackend(t *testing.T) { if runtimeCommand.Command != command { t.Fatal("Command was not forwarded to runtime backend") } - if runtimeCommand.DataRoot != "/runtime-data" { - t.Fatalf("DataRoot = %s, want /runtime-data", runtimeCommand.DataRoot) + if runtimeCommand.Root != "/runtime-data" { + t.Fatalf("Root = %s, want /runtime-data", runtimeCommand.Root) } if len(runtimeCommand.GlobalPorts) != 1 || runtimeCommand.GlobalPorts[0].Name != "http" { t.Fatalf("GlobalPorts = %#v", runtimeCommand.GlobalPorts) @@ -173,6 +173,28 @@ func TestBuildRuntimeProcedureEnvSetsWaitBeforeRouting(t *testing.T) { } } +func TestBuildRuntimeProcedureEnvDerivesURLFromPortProtocol(t *testing.T) { + command := &domain.CommandInstructionSet{Procedures: []*domain.Procedure{{Image: "alpine:3.20"}}} + envs, err := services.BuildRuntimeProcedureEnv(&domain.File{ + Name: "test", + Ports: []domain.Port{{Name: "http", Port: 8080, Protocol: "http"}}, + }, "serve", command, services.RuntimeEnvContext{ + Routing: []domain.RuntimeRouteAssignment{{ + Name: "web-http", + PortName: "http", + Host: "localhost", + ExternalIP: "127.0.0.1", + PublicPort: 18080, + }}, + }) + if err != nil { + t.Fatal(err) + } + if got := envs["serve.0"]["DRUID_PORT_HTTP_URL"]; got != "http://localhost:18080" { + t.Fatalf("DRUID_PORT_HTTP_URL = %q", got) + } +} + func TestBuildRuntimeProcedureEnvRejectsDuplicateNormalizedPortNames(t *testing.T) { _, err := services.BuildRuntimeProcedureEnv(&domain.File{ Name: "scroll-name", diff --git a/internal/core/services/queue_manager.go b/internal/core/services/queue_manager.go index 77cd5653..b8bbd1b6 100644 --- a/internal/core/services/queue_manager.go +++ b/internal/core/services/queue_manager.go @@ -17,9 +17,8 @@ var ErrCommandNotFound = fmt.Errorf("command not found") var ErrCommandDoneOnce = fmt.Errorf("command is already done and has run mode once") type AddItemOptions struct { - Wait bool - RunAfterExecution func() - Force bool + Wait bool + Force bool } type QueueStatusObserver func(command string, status domain.ScrollLockStatus, exitCode *int) @@ -34,7 +33,6 @@ type QueueManager struct { taskDoneChan chan struct{} shutdownChan chan struct{} notifierChan []chan []string - callbacksPostRun map[string]func() statusObserver QueueStatusObserver } @@ -50,7 +48,6 @@ func NewQueueManager( taskDoneChan: make(chan struct{}, 1), // FIXED: Buffered channel shutdownChan: make(chan struct{}), notifierChan: make([]chan []string, 0), - callbacksPostRun: make(map[string]func()), } } @@ -100,24 +97,6 @@ func (sc *QueueManager) AddForcedItem(cmd string) error { return sc.addQueueItem(cmd, AddItemOptions{Force: true}) } -func (sc *QueueManager) AddAndRememberItem(cmd string) error { - return sc.addQueueItem(cmd, AddItemOptions{}) -} - -func (sc *QueueManager) AddShutdownItem(cmd string) error { - return sc.addQueueItem(cmd, AddItemOptions{ - RunAfterExecution: func() { - sc.Shutdown() - }, - }) -} - -func (sc *QueueManager) AddItemWithCallback(cmd string, cb func()) error { - return sc.addQueueItem(cmd, AddItemOptions{ - RunAfterExecution: cb, - }) -} - func (sc *QueueManager) RememberDoneItem(cmd string) { sc.mu.Lock() defer sc.mu.Unlock() @@ -172,10 +151,6 @@ func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { DoneChan: doneChan, } - if options.RunAfterExecution != nil { - item.RunAfterExecution = options.RunAfterExecution - } - sc.commandQueue[cmd] = item sc.observeStatusLocked(cmd, domain.ScrollLockStatusWaiting, nil) @@ -197,15 +172,6 @@ func (sc *QueueManager) addQueueItem(cmd string, options AddItemOptions) error { return nil } -func (sc *QueueManager) RegisterCallbacks(callbacks map[string]func()) { - sc.mu.Lock() - defer sc.mu.Unlock() - - for cmd, cb := range callbacks { - sc.callbacksPostRun[cmd] = cb - } -} - func (sc *QueueManager) SetStatusObserver(observer QueueStatusObserver) { sc.mu.Lock() defer sc.mu.Unlock() @@ -220,10 +186,6 @@ func (sc *QueueManager) HydrateCommandStatuses(statuses map[string]domain.LockSt } if status.Status == domain.ScrollLockStatusDone { - if callback, ok := sc.callbacksPostRun[cmd]; ok && callback != nil { - callback() - } - if command.Run != domain.RunModeRestart && command.Run != domain.RunModePersistent { sc.mu.Lock() sc.commandQueue[cmd] = &domain.QueueItem{ @@ -234,9 +196,7 @@ func (sc *QueueManager) HydrateCommandStatuses(statuses map[string]domain.LockSt } } - sc.addQueueItem(cmd, AddItemOptions{ - RunAfterExecution: nil, - }) + sc.addQueueItem(cmd, AddItemOptions{}) } return nil @@ -338,13 +298,6 @@ func (sc *QueueManager) RunQueue() { close(i.DoneChan) } - if i.RunAfterExecution != nil { - i.RunAfterExecution() - } - if callback, ok := sc.callbacksPostRun[c]; ok && callback != nil { - callback() - } - // FIXED: Non-blocking send to buffered channel sc.taskDoneChan <- struct{}{} }() diff --git a/internal/core/services/registry/credential_store.go b/internal/core/services/registry/credential_store.go index 5fb8940c..dbbbd43a 100644 --- a/internal/core/services/registry/credential_store.go +++ b/internal/core/services/registry/credential_store.go @@ -43,3 +43,9 @@ func (s *CredentialStore) CredentialForRepo(repoURL string) (auth.Credential, er func (s *CredentialStore) HasCredentials() bool { return len(s.registries) > 0 } + +func (s *CredentialStore) Credentials() []domain.RegistryCredential { + out := make([]domain.RegistryCredential, len(s.registries)) + copy(out, s.registries) + return out +} diff --git a/internal/core/services/registry/oci.go b/internal/core/services/registry/oci.go index 34c41f85..3c612027 100644 --- a/internal/core/services/registry/oci.go +++ b/internal/core/services/registry/oci.go @@ -7,6 +7,7 @@ import ( "fmt" "net/http" "os" + "path" "path/filepath" "regexp" "strconv" @@ -179,7 +180,7 @@ func (c *OciClient) PullSelective(dir string, artifact string, includeData bool, var bytesDownloaded atomic.Int64 if progress != nil { - progress.Mode.Store("restore") + progress.Mode.Store(domain.SnapshotProgressModeRestore) progress.Percentage.Store(0) } @@ -284,14 +285,14 @@ func (c *OciClient) PullSelective(dir string, artifact string, includeData bool, stopProgress() if err != nil { if progress != nil { - progress.Mode.Store("noop") + progress.Mode.Store(domain.SnapshotProgressModeIdle) } return err } if progress != nil { progress.Percentage.Store(100) - progress.Mode.Store("noop") + progress.Mode.Store(domain.SnapshotProgressModeIdle) } logger.Log().Info("Manifest pulled", zap.String("digest", manifestDescriptor.Digest.String()), zap.String("mediaType", manifestDescriptor.MediaType)) @@ -332,13 +333,114 @@ func (c *OciClient) PullSelective(dir string, artifact string, includeData bool, return nil } +func (c *OciClient) FetchFile(artifact string, filePath string) ([]byte, error) { + repo, ref, _ := utils.ParseArtifactRef(artifact) + if repo == "" || ref == "" { + return nil, fmt.Errorf("reference (tag or digest) must be set") + } + filePath = cleanOCIFilePath(filePath) + if filePath == "" { + return nil, fmt.Errorf("file path is required") + } + + ctx := context.Background() + repoInstance, err := c.GetRepo(repo) + if err != nil { + return nil, err + } + rootDesc, err := oras.Resolve(ctx, repoInstance, ref, oras.DefaultResolveOptions) + if err != nil { + return nil, fmt.Errorf("failed to resolve %s: %w", ref, err) + } + data, err := fetchFileFromOCI(ctx, repoInstance, rootDesc, filePath) + if err != nil { + return nil, fmt.Errorf("failed to fetch %s from %s: %w", filePath, artifact, err) + } + return data, nil +} + +func (c *OciClient) ResolveDigest(artifact string) (string, error) { + repo, ref, _ := utils.ParseArtifactRef(artifact) + if repo == "" || ref == "" { + return "", fmt.Errorf("reference (tag or digest) must be set") + } + repoInstance, err := c.GetRepo(repo) + if err != nil { + return "", err + } + desc, err := oras.Resolve(context.Background(), repoInstance, ref, oras.DefaultResolveOptions) + if err != nil { + return "", fmt.Errorf("failed to resolve %s: %w", ref, err) + } + return desc.Digest.String(), nil +} + +func fetchFileFromOCI(ctx context.Context, fetcher content.Fetcher, rootDesc v1.Descriptor, filePath string) ([]byte, error) { + seen := map[string]bool{} + queue := []v1.Descriptor{rootDesc} + if rootDesc.Digest.String() != "" { + seen[rootDesc.Digest.String()] = true + } + for len(queue) > 0 { + current := queue[0] + queue = queue[1:] + if !descriptorCanHaveChildren(current) { + continue + } + successors, err := content.Successors(ctx, fetcher, current) + if err != nil { + return nil, err + } + for _, desc := range successors { + if descriptorMatchesPath(desc, filePath) { + return content.FetchAll(ctx, fetcher, desc) + } + key := desc.Digest.String() + if key == "" || seen[key] || !descriptorCanHaveChildren(desc) { + continue + } + seen[key] = true + queue = append(queue, desc) + } + } + return nil, fmt.Errorf("%s not found in artifact", filePath) +} + +func descriptorMatchesPath(desc v1.Descriptor, want string) bool { + for _, key := range []string{"org.opencontainers.image.path", "org.opencontainers.image.title"} { + if cleanOCIFilePath(desc.Annotations[key]) == want { + return true + } + } + return false +} + +func descriptorCanHaveChildren(desc v1.Descriptor) bool { + mediaType := strings.TrimSuffix(desc.MediaType, "+gzip") + return mediaType == v1.MediaTypeImageManifest || + mediaType == v1.MediaTypeImageIndex || + strings.Contains(mediaType, "manifest") || + strings.Contains(mediaType, "index") +} + +func cleanOCIFilePath(filePath string) string { + filePath = filepath.ToSlash(strings.TrimSpace(filePath)) + filePath = strings.TrimLeft(filePath, "/") + filePath = strings.TrimPrefix(filePath, "./") + filePath = path.Clean(filePath) + if filePath == "." { + return "" + } + return filePath +} + func (c *OciClient) CanUpdateTag(current v1.Descriptor, r string, tag string) (bool, error) { repo, err := c.GetRepo(r) if err != nil { return false, err } - disc, err := oras.Resolve(context.TODO(), repo, tag, oras.DefaultResolveOptions) + disc, err := oras.Resolve(context.Background(), repo, tag, oras.DefaultResolveOptions) if err != nil { return false, err } @@ -715,7 +817,7 @@ func (c *OciClient) Push(folder string, repo string, tag string, overrides map[s annotations[k] = v } - rootManifestDescriptor, err := oras.PackManifest(ctx, fs, oras.PackManifestVersion1_1, string(domain.ArtifactTypeScrollRoot), oras.PackManifestOptions{ + rootManifestDescriptor, err := oras.PackManifest(ctx, fs, oras.PackManifestVersion1_1, string(domain.ArtifactTypeRuntimeRoot), oras.PackManifestOptions{ Layers: descriptorsForRoot, ManifestAnnotations: annotations, }) @@ -763,7 +865,7 @@ func (c *OciClient) PushCategory(dir string, repo string, category string) (v1.D return v1.Descriptor{}, err } - rootManifestDescriptor, err := oras.PackManifest(ctx, fs, oras.PackManifestVersion1_1, string(domain.ArtifactTypeScrollRoot), oras.PackManifestOptions{ + rootManifestDescriptor, err := oras.PackManifest(ctx, fs, oras.PackManifestVersion1_1, string(domain.ArtifactTypeRuntimeRoot), oras.PackManifestOptions{ Layers: manifestDescriptors, }) if err != nil { diff --git a/internal/core/services/registry/oci_test.go b/internal/core/services/registry/oci_test.go index b5ea3a1a..c6a5c77a 100644 --- a/internal/core/services/registry/oci_test.go +++ b/internal/core/services/registry/oci_test.go @@ -44,6 +44,7 @@ func fakeRegistry(t *testing.T) *httptest.Server { ref := strings.Split(r.URL.Path, "/manifests/")[1] if data, ok := manifests[ref]; ok { w.Header().Set("Content-Type", manifestTypes[ref]) + w.Header().Set("Docker-Content-Digest", ocidigest.FromBytes(data).String()) w.WriteHeader(http.StatusOK) w.Write(data) return @@ -71,6 +72,7 @@ func fakeRegistry(t *testing.T) *httptest.Server { if data, ok := manifests[ref]; ok { w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data))) w.Header().Set("Content-Type", manifestTypes[ref]) + w.Header().Set("Docker-Content-Digest", ocidigest.FromBytes(data).String()) w.WriteHeader(http.StatusOK) return } @@ -217,3 +219,40 @@ func TestPushPullExecutableDataChunkPreservesMode(t *testing.T) { t.Fatalf("data/arkserver mode = %v, want 0755", got) } } + +func TestFetchFileReadsScrollYAMLDescriptor(t *testing.T) { + tmpDir := t.TempDir() + t.Chdir(tmpDir) + + srv := fakeRegistry(t) + registryHost := strings.TrimPrefix(srv.URL, "http://") + + folder := filepath.Join("scrolls", "fetch-file") + if err := os.MkdirAll(folder, 0755); err != nil { + t.Fatal(err) + } + scrollYAML := []byte("name: test\nversion: 0.1.0\napp_version: \"1.0\"\n") + if err := os.WriteFile(filepath.Join(folder, "scroll.yaml"), scrollYAML, 0644); err != nil { + t.Fatal(err) + } + + client := &OciClient{ + credentialStore: NewCredentialStore([]domain.RegistryCredential{}), + plainHTTP: true, + } + repoRef := registryHost + "/test/fetch-file" + if _, err := client.Push(folder, repoRef, "1.0", map[string]string{}, false, nil); err != nil { + t.Fatalf("Push failed unexpectedly: %v", err) + } + + got, err := client.FetchFile(repoRef+":1.0", "./scroll.yaml") + if err != nil { + t.Fatal(err) + } + if string(got) != string(scrollYAML) { + t.Fatalf("scroll.yaml = %q, want %q", got, scrollYAML) + } + if _, err := client.FetchFile(repoRef+":1.0", "missing.txt"); err == nil || !strings.Contains(err.Error(), "missing.txt not found") { + t.Fatalf("missing error = %v, want clear not found", err) + } +} diff --git a/internal/core/services/runtime_env.go b/internal/core/services/runtime_env.go index f0c38989..89d39504 100644 --- a/internal/core/services/runtime_env.go +++ b/internal/core/services/runtime_env.go @@ -60,6 +60,7 @@ func runtimeEnv(file *domain.File, context RuntimeEnvContext) (map[string]string } seen := map[string]string{} + portProtocols := map[string]string{} for _, port := range file.Ports { suffix := envSuffix(port.Name) if suffix == "" { @@ -73,6 +74,7 @@ func runtimeEnv(file *domain.File, context RuntimeEnvContext) (map[string]string env["DRUID_PORT_"+suffix+"_1"] = strconv.Itoa(port.Port) if port.Protocol != "" { env["DRUID_PORT_"+suffix+"_PROTOCOL"] = port.Protocol + portProtocols[suffix] = port.Protocol } } @@ -103,6 +105,14 @@ func runtimeEnv(file *domain.File, context RuntimeEnvContext) (map[string]string } if assignment.URL != "" { env["DRUID_PORT_"+suffix+"_URL"] = assignment.URL + } else if assignment.Host != "" && assignment.PublicPort > 0 { + protocol := assignment.Protocol + if portProtocols[suffix] == "http" || portProtocols[suffix] == "https" { + protocol = portProtocols[suffix] + } + if protocol == "http" || protocol == "https" { + env["DRUID_PORT_"+suffix+"_URL"] = fmt.Sprintf("%s://%s:%d", protocol, assignment.Host, assignment.PublicPort) + } } } return env, nil diff --git a/internal/core/services/runtime_scroll_manager.go b/internal/core/services/runtime_scroll_manager.go index 7850ab0e..4f2342e4 100644 --- a/internal/core/services/runtime_scroll_manager.go +++ b/internal/core/services/runtime_scroll_manager.go @@ -24,20 +24,21 @@ func NewRuntimeScrollManager(store RuntimeScrollStore) *RuntimeScrollManager { return &RuntimeScrollManager{store: store} } -func (m *RuntimeScrollManager) Create(artifact string, requestedName string, scrollRoot string, dataRoot string, scrollYAML []byte) (*domain.RuntimeScroll, error) { +func (m *RuntimeScrollManager) Create(artifact string, requestedName string, root string, scrollYAML []byte) (*domain.RuntimeScroll, error) { + return m.CreateWithDigest(artifact, "", requestedName, "", root, scrollYAML) +} + +func (m *RuntimeScrollManager) CreateWithDigest(artifact string, artifactDigest string, requestedName string, ownerID string, root string, scrollYAML []byte) (*domain.RuntimeScroll, error) { if artifact == "" { return nil, fmt.Errorf("artifact is required") } - if scrollRoot == "" { - return nil, fmt.Errorf("scroll root is required") - } - if dataRoot == "" { - return nil, fmt.Errorf("data root is required") + if root == "" { + return nil, fmt.Errorf("runtime root is required") } if len(scrollYAML) == 0 { return nil, fmt.Errorf("scroll yaml is required") } - scroll, err := domain.NewScrollFromBytes(scrollRoot, scrollYAML) + scroll, err := domain.NewScrollFromBytes(root, scrollYAML) if err != nil { return nil, err } @@ -55,14 +56,15 @@ func (m *RuntimeScrollManager) Create(artifact string, requestedName string, scr } runtimeScroll := &domain.RuntimeScroll{ - ID: id, - Artifact: artifact, - ScrollRoot: scrollRoot, - DataRoot: dataRoot, - ScrollName: scroll.Name, - ScrollYAML: string(scrollYAML), - Status: domain.RuntimeScrollStatusCreated, - Commands: map[string]domain.LockStatus{}, + ID: id, + OwnerID: ownerID, + Artifact: artifact, + ArtifactDigest: artifactDigest, + Root: root, + ScrollName: scroll.Name, + ScrollYAML: string(scrollYAML), + Status: domain.RuntimeScrollStatusCreated, + Commands: map[string]domain.LockStatus{}, } if err := m.store.CreateScroll(runtimeScroll); err != nil { return nil, err @@ -101,120 +103,56 @@ func RuntimeScrollIDFromName(name string) string { return name } -func MaterializeScrollArtifact(artifact string, scrollRoot string, dataRoot string, ociRegistry ports.OciRegistryInterface, includeData bool) error { +func MaterializeScrollArtifact(artifact string, root string, ociRegistry ports.OciRegistryInterface, includeData bool) error { if artifact == "" { return fmt.Errorf("artifact is required") } - if scrollRoot == "" { - return fmt.Errorf("scroll root is required") - } - if dataRoot == "" { - return fmt.Errorf("data root is required") + if root == "" { + return fmt.Errorf("runtime root is required") } - if err := os.RemoveAll(scrollRoot); err != nil { + if err := os.RemoveAll(root); err != nil { return err } - if err := os.MkdirAll(scrollRoot, 0755); err != nil { + if err := os.MkdirAll(root, 0755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(root, domain.RuntimeDataDir), 0755); err != nil { return err } if localPathExists(artifact) { - if err := materializeLocalArtifact(artifact, scrollRoot); err != nil { + if err := materializeLocalArtifact(artifact, root); err != nil { return err } - if scrollRoot == dataRoot { - return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) - } - return moveRuntimeData(scrollRoot, dataRoot) + return os.MkdirAll(filepath.Join(root, domain.RuntimeDataDir), 0755) } if ociRegistry == nil { return fmt.Errorf("OCI registry is required to pull %s", artifact) } - if err := ociRegistry.PullSelective(scrollRoot, artifact, includeData, nil); err != nil { + if err := ociRegistry.PullSelective(root, artifact, includeData, nil); err != nil { return err } - if includeData { - if scrollRoot == dataRoot { - return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) - } - return moveRuntimeData(scrollRoot, dataRoot) - } - return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) + return os.MkdirAll(filepath.Join(root, domain.RuntimeDataDir), 0755) } -func moveRuntimeData(scrollRoot string, dataRoot string) error { - src := filepath.Join(scrollRoot, domain.RuntimeDataDir) - if !localPathExists(src) { - return os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755) - } - dst := filepath.Join(dataRoot, domain.RuntimeDataDir) - if err := os.RemoveAll(dst); err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err +func MoveMaterializedScroll(srcRoot string, dstRoot string) error { + if localPathExists(dstRoot) { + return fmt.Errorf("target runtime root already exists: %s", dstRoot) } - if err := os.Rename(src, dst); err == nil { - return nil - } - if err := copyDir(src, dst); err != nil { + if err := os.MkdirAll(filepath.Dir(dstRoot), 0755); err != nil { return err } - return os.RemoveAll(src) -} - -func MoveMaterializedScroll(srcScrollRoot string, srcDataRoot string, dstScrollRoot string, dstDataRoot string) error { - if srcScrollRoot == srcDataRoot && dstScrollRoot == dstDataRoot { - if localPathExists(dstScrollRoot) { - return fmt.Errorf("target scroll root already exists: %s", dstScrollRoot) - } - if err := os.MkdirAll(filepath.Dir(dstScrollRoot), 0755); err != nil { - return err - } - if err := os.Rename(srcScrollRoot, dstScrollRoot); err != nil { - if err := copyDir(srcScrollRoot, dstScrollRoot); err != nil { - return err - } - if err := os.RemoveAll(srcScrollRoot); err != nil { - return err - } - } - return nil - } - if localPathExists(dstScrollRoot) { - return fmt.Errorf("target scroll root already exists: %s", dstScrollRoot) - } - if localPathExists(dstDataRoot) { - return fmt.Errorf("target data root already exists: %s", dstDataRoot) - } - if err := os.MkdirAll(filepath.Dir(dstScrollRoot), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(dstDataRoot), 0755); err != nil { - return err - } - if err := os.Rename(srcScrollRoot, dstScrollRoot); err != nil { - if err := copyDir(srcScrollRoot, dstScrollRoot); err != nil { - return err - } - if err := os.RemoveAll(srcScrollRoot); err != nil { - return err - } - } - if err := os.Rename(srcDataRoot, dstDataRoot); err != nil { - if err := copyDir(srcDataRoot, dstDataRoot); err != nil { + if err := os.Rename(srcRoot, dstRoot); err != nil { + if err := copyDir(srcRoot, dstRoot); err != nil { return err } - if err := os.RemoveAll(srcDataRoot); err != nil { + if err := os.RemoveAll(srcRoot); err != nil { return err } } return nil } -func materializeLocalArtifact(artifact string, scrollRoot string) error { +func materializeLocalArtifact(artifact string, root string) error { info, err := os.Stat(artifact) if err != nil { return err @@ -223,9 +161,9 @@ func materializeLocalArtifact(artifact string, scrollRoot string) error { if filepath.Base(artifact) != "scroll.yaml" { return fmt.Errorf("local file artifact must be scroll.yaml") } - return copyFile(artifact, filepath.Join(scrollRoot, "scroll.yaml")) + return copyFile(artifact, filepath.Join(root, "scroll.yaml")) } - return copyDir(artifact, scrollRoot) + return copyDir(artifact, root) } func copyDir(src string, dst string) error { diff --git a/internal/core/services/runtime_scroll_manager_test.go b/internal/core/services/runtime_scroll_manager_test.go index 18aa725f..933ee7da 100644 --- a/internal/core/services/runtime_scroll_manager_test.go +++ b/internal/core/services/runtime_scroll_manager_test.go @@ -46,10 +46,10 @@ func TestRuntimeScrollManagerCreateFailsDuplicateID(t *testing.T) { store := NewRuntimeStateStore(t.TempDir()) manager := NewRuntimeScrollManager(store) - if _, err := manager.Create("artifact", "", t.TempDir(), filepath.Join(t.TempDir(), "data"), []byte(testScrollYAML)); err != nil { + if _, err := manager.Create("artifact", "", t.TempDir(), []byte(testScrollYAML)); err != nil { t.Fatal(err) } - _, err := manager.Create("artifact", "", t.TempDir(), filepath.Join(t.TempDir(), "data"), []byte(testScrollYAML)) + _, err := manager.Create("artifact", "", t.TempDir(), []byte(testScrollYAML)) if !errors.Is(err, ErrScrollAlreadyExists) { t.Fatalf("error = %v, want ErrScrollAlreadyExists", err) } @@ -57,8 +57,8 @@ func TestRuntimeScrollManagerCreateFailsDuplicateID(t *testing.T) { func TestRuntimeStateStoreUsesSingleRuntimeRoot(t *testing.T) { store := NewRuntimeStateStore(t.TempDir()) - if got, want := store.DataRoot("scroll-a"), store.ScrollRoot("scroll-a"); got != want { - t.Fatalf("DataRoot = %s, want %s", got, want) + if got, want := store.Root("scroll-a"), filepath.Join(store.StateDir(), "scrolls", "scroll-a"); got != want { + t.Fatalf("Root = %s, want %s", got, want) } } @@ -75,7 +75,7 @@ func TestMaterializeScrollArtifactKeepsScrollYamlNextToData(t *testing.T) { } root := t.TempDir() - if err := MaterializeScrollArtifact(artifact, root, root, nil, true); err != nil { + if err := MaterializeScrollArtifact(artifact, root, nil, true); err != nil { t.Fatal(err) } diff --git a/internal/core/services/runtime_state_store.go b/internal/core/services/runtime_state_store.go index d690ede0..fe5db9b6 100644 --- a/internal/core/services/runtime_state_store.go +++ b/internal/core/services/runtime_state_store.go @@ -22,8 +22,7 @@ type RuntimeStateStore struct { type RuntimeScrollStore interface { StateDir() string - ScrollRoot(id string) string - DataRoot(id string) string + Root(id string) string CreateScroll(scroll *domain.RuntimeScroll) error ListScrolls() ([]*domain.RuntimeScroll, error) GetScroll(id string) (*domain.RuntimeScroll, error) @@ -42,14 +41,10 @@ func (s *RuntimeStateStore) StateDir() string { return s.stateDir } -func (s *RuntimeStateStore) ScrollRoot(id string) string { +func (s *RuntimeStateStore) Root(id string) string { return filepath.Join(s.stateDir, "scrolls", id) } -func (s *RuntimeStateStore) DataRoot(id string) string { - return s.ScrollRoot(id) -} - func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { db, err := s.open() if err != nil { @@ -76,9 +71,9 @@ func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { } _, err = db.Exec(` - INSERT INTO scrolls (id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json) + INSERT INTO scrolls (id, owner_id, artifact, artifact_digest, root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - `, scroll.ID, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, scroll.LastError, formatTime(scroll.CreatedAt), formatTime(scroll.UpdatedAt), string(commands), string(routing)) + `, scroll.ID, scroll.OwnerID, scroll.Artifact, scroll.ArtifactDigest, scroll.Root, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, scroll.LastError, formatTime(scroll.CreatedAt), formatTime(scroll.UpdatedAt), string(commands), string(routing)) if err != nil { return fmt.Errorf("create runtime scroll %s: %w", scroll.ID, err) } @@ -93,7 +88,7 @@ func (s *RuntimeStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { defer db.Close() rows, err := db.Query(` - SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json + SELECT id, owner_id, artifact, artifact_digest, root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json FROM scrolls ORDER BY id `) @@ -121,7 +116,7 @@ func (s *RuntimeStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) defer db.Close() row := db.QueryRow(` - SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json + SELECT id, owner_id, artifact, artifact_digest, root, scroll_name, scroll_yaml, status, last_error, created_at, updated_at, commands_json, routing_json FROM scrolls WHERE id = ? `, id) @@ -150,9 +145,9 @@ func (s *RuntimeStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { } res, err := db.Exec(` UPDATE scrolls - SET owner_id = ?, artifact = ?, scroll_root = ?, data_root = ?, scroll_name = ?, scroll_yaml = ?, status = ?, last_error = ?, updated_at = ?, commands_json = ?, routing_json = ? + SET owner_id = ?, artifact = ?, artifact_digest = ?, root = ?, scroll_name = ?, scroll_yaml = ?, status = ?, last_error = ?, updated_at = ?, commands_json = ?, routing_json = ? WHERE id = ? - `, scroll.OwnerID, scroll.Artifact, scroll.ScrollRoot, scroll.DataRoot, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, scroll.LastError, formatTime(scroll.UpdatedAt), string(commands), string(routing), scroll.ID) + `, scroll.OwnerID, scroll.Artifact, scroll.ArtifactDigest, scroll.Root, scroll.ScrollName, scroll.ScrollYAML, scroll.Status, scroll.LastError, formatTime(scroll.UpdatedAt), string(commands), string(routing), scroll.ID) if err != nil { return err } @@ -195,6 +190,10 @@ func (s *RuntimeStateStore) open() (*sql.DB, error) { if err != nil { return nil, err } + if _, err := db.Exec(`PRAGMA busy_timeout = 10000`); err != nil { + db.Close() + return nil, err + } if _, err := db.Exec(`PRAGMA journal_mode = WAL`); err != nil { db.Close() return nil, err @@ -204,8 +203,8 @@ func (s *RuntimeStateStore) open() (*sql.DB, error) { id TEXT PRIMARY KEY, owner_id TEXT NOT NULL DEFAULT '', artifact TEXT NOT NULL, - scroll_root TEXT NOT NULL, - data_root TEXT NOT NULL DEFAULT '', + artifact_digest TEXT NOT NULL DEFAULT '', + root TEXT NOT NULL, scroll_name TEXT NOT NULL, scroll_yaml TEXT NOT NULL DEFAULT '', status TEXT NOT NULL, @@ -219,69 +218,29 @@ func (s *RuntimeStateStore) open() (*sql.DB, error) { db.Close() return nil, err } - if err := ensureColumn(db, "scrolls", "data_root", "TEXT NOT NULL DEFAULT ''"); err != nil { + if err := ensureColumn(db, "scrolls", "artifact_digest", "TEXT NOT NULL DEFAULT ''"); err != nil { db.Close() return nil, err } - if err := ensureColumn(db, "scrolls", "scroll_yaml", "TEXT NOT NULL DEFAULT ''"); err != nil { + if err := ensureColumn(db, "scrolls", "root", "TEXT NOT NULL DEFAULT ''"); err != nil { db.Close() return nil, err } - if err := ensureColumn(db, "scrolls", "last_error", "TEXT NOT NULL DEFAULT ''"); err != nil { + if err := ensureColumn(db, "scrolls", "scroll_yaml", "TEXT NOT NULL DEFAULT ''"); err != nil { db.Close() return nil, err } - if err := ensureColumn(db, "scrolls", "routing_json", "TEXT NOT NULL DEFAULT '[]'"); err != nil { + if err := ensureColumn(db, "scrolls", "last_error", "TEXT NOT NULL DEFAULT ''"); err != nil { db.Close() return nil, err } - if err := removeRuntimeColumn(db); err != nil { + if err := ensureColumn(db, "scrolls", "routing_json", "TEXT NOT NULL DEFAULT '[]'"); err != nil { db.Close() return nil, err } return db, nil } -func removeRuntimeColumn(db *sql.DB) error { - hasRuntime, err := tableHasColumn(db, "scrolls", "runtime") - if err != nil || !hasRuntime { - return err - } - if _, err := db.Exec(` - CREATE TABLE scrolls_new ( - id TEXT PRIMARY KEY, - owner_id TEXT NOT NULL DEFAULT '', - artifact TEXT NOT NULL, - scroll_root TEXT NOT NULL, - data_root TEXT NOT NULL DEFAULT '', - scroll_name TEXT NOT NULL, - scroll_yaml TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL, - last_error TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - commands_json TEXT NOT NULL DEFAULT '{}', - routing_json TEXT NOT NULL DEFAULT '[]' - ) - `); err != nil { - return err - } - if _, err := db.Exec(` - INSERT INTO scrolls_new (id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json) - SELECT id, owner_id, artifact, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json - FROM scrolls - `); err != nil { - return err - } - if _, err := db.Exec(`DROP TABLE scrolls`); err != nil { - return err - } - if _, err := db.Exec(`ALTER TABLE scrolls_new RENAME TO scrolls`); err != nil { - return err - } - return nil -} - func ensureColumn(db *sql.DB, table string, column string, definition string) error { exists, err := tableHasColumn(db, table, column) if err != nil || exists { @@ -329,7 +288,7 @@ func scanRuntimeScroll(scanner runtimeScrollScanner) (*domain.RuntimeScroll, err var updatedAt string var commandsJSON string var routingJSON string - if err := scanner.Scan(&scroll.ID, &scroll.OwnerID, &scroll.Artifact, &scroll.ScrollRoot, &scroll.DataRoot, &scroll.ScrollName, &scroll.ScrollYAML, &status, &lastError, &createdAt, &updatedAt, &commandsJSON, &routingJSON); err != nil { + if err := scanner.Scan(&scroll.ID, &scroll.OwnerID, &scroll.Artifact, &scroll.ArtifactDigest, &scroll.Root, &scroll.ScrollName, &scroll.ScrollYAML, &status, &lastError, &createdAt, &updatedAt, &commandsJSON, &routingJSON); err != nil { return nil, err } scroll.Status = domain.RuntimeScrollStatus(status) diff --git a/internal/core/services/runtime_state_store_test.go b/internal/core/services/runtime_state_store_test.go index a80be484..9d622634 100644 --- a/internal/core/services/runtime_state_store_test.go +++ b/internal/core/services/runtime_state_store_test.go @@ -1,13 +1,10 @@ package services_test import ( - "database/sql" - "path/filepath" "testing" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/services" - _ "modernc.org/sqlite" ) func TestRuntimeStateStorePersistsCommandStatuses(t *testing.T) { @@ -16,8 +13,7 @@ func TestRuntimeStateStorePersistsCommandStatuses(t *testing.T) { scroll := &domain.RuntimeScroll{ ID: "test", Artifact: "example", - ScrollRoot: "/tmp/spec", - DataRoot: "/tmp/data", + Root: "/tmp/root", ScrollName: "test", ScrollYAML: "name: test\n", Commands: map[string]domain.LockStatus{ @@ -60,76 +56,3 @@ func TestRuntimeStateStorePersistsCommandStatuses(t *testing.T) { t.Fatalf("scroll yaml = %q, want cached yaml", got.ScrollYAML) } } - -func TestRuntimeStateStoreMigratesRuntimeColumn(t *testing.T) { - stateDir := t.TempDir() - dbPath := filepath.Join(stateDir, "state.db") - db, err := sql.Open("sqlite", dbPath) - if err != nil { - t.Fatal(err) - } - if _, err := db.Exec(` - CREATE TABLE scrolls ( - id TEXT PRIMARY KEY, - owner_id TEXT NOT NULL DEFAULT '', - artifact TEXT NOT NULL, - runtime TEXT NOT NULL, - scroll_root TEXT NOT NULL, - data_root TEXT NOT NULL DEFAULT '', - scroll_name TEXT NOT NULL, - scroll_yaml TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - commands_json TEXT NOT NULL DEFAULT '{}' - ) - `); err != nil { - t.Fatal(err) - } - if _, err := db.Exec(` - INSERT INTO scrolls (id, owner_id, artifact, runtime, scroll_root, data_root, scroll_name, scroll_yaml, status, created_at, updated_at, commands_json) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - `, "legacy", "", "example", "docker", "/tmp/spec", "/tmp/data", "legacy", "name: legacy\n", "stopped", "2026-01-01T00:00:00Z", "2026-01-01T00:00:00Z", "{}"); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - store := services.NewRuntimeStateStore(stateDir) - got, err := store.GetScroll("legacy") - if err != nil { - t.Fatal(err) - } - if got.ID != "legacy" || got.Artifact != "example" || got.ScrollYAML != "name: legacy\n" { - t.Fatalf("migrated scroll = %#v", got) - } - - db, err = sql.Open("sqlite", dbPath) - if err != nil { - t.Fatal(err) - } - defer db.Close() - rows, err := db.Query(`PRAGMA table_info(scrolls)`) - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var cid int - var name string - var columnType string - var notNull int - var defaultValue sql.NullString - var pk int - if err := rows.Scan(&cid, &name, &columnType, ¬Null, &defaultValue, &pk); err != nil { - t.Fatal(err) - } - if name == "runtime" { - t.Fatal("runtime column should be removed during migration") - } - } - if err := rows.Err(); err != nil { - t.Fatal(err) - } -} diff --git a/internal/core/services/watch_service.go b/internal/core/services/watch_service.go index 4fd94820..c5edc2fb 100644 --- a/internal/core/services/watch_service.go +++ b/internal/core/services/watch_service.go @@ -29,7 +29,7 @@ type CommandDoneEvent struct { Timestamp time.Time `json:"timestamp"` } -// WatchService handles file watching and change notifications for UI development +// WatchService handles local dev file watching and command triggers. type WatchService struct { watcher *fsnotify.Watcher broadcastChannel *domain.BroadcastChannel @@ -46,8 +46,7 @@ type WatchService struct { changeAfterBuild bool } -// NewUiDevService creates a new instance of UiDevService -func NewUiDevService( +func NewDevService( queueManager ports.QueueManagerInterface, scrollService ports.ScrollServiceInterface, ) ports.WatchServiceInterface { return &WatchService{ @@ -132,7 +131,7 @@ func (uds *WatchService) StartWatching(basePath string, paths ...string) error { // run hot reload commands initially go uds.runHotReloadCommand() - logger.Log().Info("UI dev file watcher started") + logger.Log().Info("Dev file watcher started") return nil } @@ -171,10 +170,14 @@ func (uds *WatchService) StopWatching() error { uds.ctx = nil uds.cancel = nil - logger.Log().Info("UI dev file watcher stopped") + logger.Log().Info("Dev file watcher stopped") return nil } +func (uds *WatchService) Trigger() { + go uds.runHotReloadCommand() +} + // Subscribe returns a channel for receiving file change notifications func (uds *WatchService) Subscribe() chan *[]byte { uds.mu.RLock() @@ -333,28 +336,19 @@ func (uds *WatchService) handleFileEvent(event fsnotify.Event) { go uds.runHotReloadCommand() } -// runHotReloadCommand is a unified method for executing both build and hot reload commands func (uds *WatchService) runHotReloadCommand() { - commands := uds.hotReloadCommands - uds.mu.Lock() - - // Prevent overlapping builds - if build is active, mark that a change occurred if uds.buildActive { uds.changeAfterBuild = true uds.mu.Unlock() return } - - // Check if there are commands to execute - if len(commands) == 0 { + if len(uds.hotReloadCommands) == 0 { uds.mu.Unlock() return } - - // Mark build as active and get snapshot of commands uds.buildActive = true - + commands := append([]string(nil), uds.hotReloadCommands...) broadcastChannel := uds.broadcastChannel uds.mu.Unlock() @@ -374,26 +368,20 @@ func (uds *WatchService) runHotReloadCommand() { broadcastChannel.Broadcast(eventCmdData) } - for _, key := range commands { - broadcastEvent("build-started") - uds.queueManager.AddTempItemWithWait(key) - broadcastEvent("build-ended") - - // Check if changes occurred during build - uds.mu.Lock() - for uds.changeAfterBuild { - uds.changeAfterBuild = false - uds.mu.Unlock() + for { + for _, key := range commands { broadcastEvent("build-started") uds.queueManager.AddTempItemWithWait(key) broadcastEvent("build-ended") + } - uds.mu.Lock() + uds.mu.Lock() + if !uds.changeAfterBuild { + uds.buildActive = false + uds.mu.Unlock() + return } + uds.changeAfterBuild = false uds.mu.Unlock() } - - uds.mu.Lock() - uds.buildActive = false - uds.mu.Unlock() } diff --git a/internal/core/services/watch_service_test.go b/internal/core/services/watch_service_test.go index 471388df..11fd52c4 100644 --- a/internal/core/services/watch_service_test.go +++ b/internal/core/services/watch_service_test.go @@ -3,9 +3,11 @@ package services import ( "context" "os" + "sync" "testing" "time" + "github.com/highcard-dev/daemon/internal/core/domain" mock_ports "github.com/highcard-dev/daemon/test/mock" "go.uber.org/mock/gomock" ) @@ -17,8 +19,7 @@ func TestWatchService_BasicFunctionality(t *testing.T) { queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - // Create the UI dev service - uiDevService := NewUiDevService(queueManager, scrollService) + uiDevService := NewDevService(queueManager, scrollService) // Check initial state if uiDevService.IsWatching() { @@ -75,8 +76,7 @@ func TestWatchService_MultipleSubscribers(t *testing.T) { queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - // Create the UI dev service - uiDevService := NewUiDevService(queueManager, scrollService) + uiDevService := NewDevService(queueManager, scrollService) // Start watching first err := uiDevService.StartWatching("/tmp/test", "/tmp/test/ui") @@ -109,8 +109,7 @@ func TestWatchService_ContinuousStartStop(t *testing.T) { queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - // Create the UI dev service - uiDevService := NewUiDevService(queueManager, scrollService) + uiDevService := NewDevService(queueManager, scrollService) // Test multiple start/stop cycles for i := 0; i < 5; i++ { @@ -181,8 +180,7 @@ func TestWatchService_SubscribeBeforeStart(t *testing.T) { queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) - // Create the UI dev service - uiDevService := NewUiDevService(queueManager, scrollService) + uiDevService := NewDevService(queueManager, scrollService) // Try to subscribe before starting sub := uiDevService.Subscribe() @@ -215,8 +213,7 @@ func TestWatchService_RelativePathsJoinedWithBasePath(t *testing.T) { t.Fatalf("Failed to create config directory: %v", err) } - // Create the UI dev service - uiDevService := NewUiDevService(queueManager, scrollService) + uiDevService := NewDevService(queueManager, scrollService) // Start watching with relative paths (simulating what the handler does) err := uiDevService.StartWatching(tempDir, "src", "config") @@ -269,11 +266,71 @@ func TestWatchService_RelativePathsJoinedWithBasePath(t *testing.T) { case event := <-sub: if event == nil { t.Error("Received nil event") + } else { + t.Logf("Received file change event: %s", string(*event)) } - // Successfully received a file change event, which proves the watcher - // is correctly watching the joined path (basePath + relative path) - t.Logf("Received file change event: %s", string(*event)) case <-ctx.Done(): t.Error("Timeout waiting for file change event - relative path was likely not joined with base path") } } + +func TestWatchService_RunsHotReloadCommandOnStartAndFileChange(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + tempDir := t.TempDir() + watchDir := tempDir + "/dist" + if err := os.MkdirAll(watchDir, 0755); err != nil { + t.Fatalf("Failed to create watch directory: %v", err) + } + + var mu sync.Mutex + runCount := 0 + ran := make(chan struct{}, 10) + queueManager := mock_ports.NewMockQueueManagerInterface(ctrl) + queueManager.EXPECT().AddTempItemWithWait("build").DoAndReturn(func(string) error { + mu.Lock() + runCount++ + mu.Unlock() + ran <- struct{}{} + return nil + }).AnyTimes() + + scrollService := mock_ports.NewMockScrollServiceInterface(ctrl) + scrollService.EXPECT().GetCommand("build").Return(&domain.CommandInstructionSet{}, nil).AnyTimes() + + uiDevService := NewDevService(queueManager, scrollService) + if err := uiDevService.SetHotReloadCommands([]string{"build"}); err != nil { + t.Fatalf("SetHotReloadCommands failed: %v", err) + } + if err := uiDevService.StartWatching(tempDir, "dist"); err != nil { + t.Fatalf("StartWatching failed: %v", err) + } + defer uiDevService.StopWatching() + + waitForRunCount(t, ran, &mu, &runCount, 1) + + if err := os.WriteFile(watchDir+"/app.wasm", []byte("changed"), 0644); err != nil { + t.Fatalf("Failed to write watched file: %v", err) + } + + waitForRunCount(t, ran, &mu, &runCount, 2) +} + +func waitForRunCount(t *testing.T, ran <-chan struct{}, mu *sync.Mutex, runCount *int, want int) { + t.Helper() + deadline := time.After(2 * time.Second) + for { + mu.Lock() + got := *runCount + mu.Unlock() + if got >= want { + return + } + select { + case <-ran: + case <-deadline: + t.Fatalf("Timed out waiting for %d hot reload runs, got %d", want, got) + } + } +} diff --git a/internal/devapi/generated.go b/internal/devapi/generated.go new file mode 100644 index 00000000..bfa4d247 --- /dev/null +++ b/internal/devapi/generated.go @@ -0,0 +1,1117 @@ +// Package devapi provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. +package devapi + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/gofiber/fiber/v2" + "github.com/oapi-codegen/runtime" +) + +// FilePath defines model for FilePath. +type FilePath = string + +// GetFileParams defines parameters for GetFile. +type GetFileParams struct { + // Path Runtime-root-relative file path, for example data/private/package.json. + Path FilePath `form:"path" json:"path"` +} + +// HeadFileParams defines parameters for HeadFile. +type HeadFileParams struct { + // Path Runtime-root-relative file path, for example data/private/package.json. + Path FilePath `form:"path" json:"path"` +} + +// OptionsFileParams defines parameters for OptionsFile. +type OptionsFileParams struct { + // Path Runtime-root-relative file path, for example data/private/package.json. + Path FilePath `form:"path" json:"path"` +} + +// PutFileTextBody defines parameters for PutFile. +type PutFileTextBody = string + +// PutFileParams defines parameters for PutFile. +type PutFileParams struct { + // Path Runtime-root-relative file path, for example data/private/package.json. + Path FilePath `form:"path" json:"path"` +} + +// PutFileTextRequestBody defines body for PutFile for text/plain ContentType. +type PutFileTextRequestBody = PutFileTextBody + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string + + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer + + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} + +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error + +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} + +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } +} + +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } +} + +// The interface specification for the client above. +type ClientInterface interface { + // GetFile request + GetFile(ctx context.Context, params *GetFileParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // HeadFile request + HeadFile(ctx context.Context, params *HeadFileParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // OptionsFile request + OptionsFile(ctx context.Context, params *OptionsFileParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PutFileWithBody request with any body + PutFileWithBody(ctx context.Context, params *PutFileParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PutFileWithTextBody(ctx context.Context, params *PutFileParams, body PutFileTextRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetHealth request + GetHealth(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // WatchNotifications request + WatchNotifications(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) +} + +func (c *Client) GetFile(ctx context.Context, params *GetFileParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetFileRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) HeadFile(ctx context.Context, params *HeadFileParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewHeadFileRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) OptionsFile(ctx context.Context, params *OptionsFileParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewOptionsFileRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutFileWithBody(ctx context.Context, params *PutFileParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutFileRequestWithBody(c.Server, params, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutFileWithTextBody(ctx context.Context, params *PutFileParams, body PutFileTextRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutFileRequestWithTextBody(c.Server, params, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetHealth(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetHealthRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) WatchNotifications(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewWatchNotificationsRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +// NewGetFileRequest generates requests for GetFile +func NewGetFileRequest(server string, params *GetFileParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/files") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "path", runtime.ParamLocationQuery, params.Path); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewHeadFileRequest generates requests for HeadFile +func NewHeadFileRequest(server string, params *HeadFileParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/files") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "path", runtime.ParamLocationQuery, params.Path); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("HEAD", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewOptionsFileRequest generates requests for OptionsFile +func NewOptionsFileRequest(server string, params *OptionsFileParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/files") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "path", runtime.ParamLocationQuery, params.Path); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("OPTIONS", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewPutFileRequestWithTextBody calls the generic PutFile builder with text/plain body +func NewPutFileRequestWithTextBody(server string, params *PutFileParams, body PutFileTextRequestBody) (*http.Request, error) { + var bodyReader io.Reader + bodyReader = strings.NewReader(string(body)) + return NewPutFileRequestWithBody(server, params, "text/plain", bodyReader) +} + +// NewPutFileRequestWithBody generates requests for PutFile with any type of body +func NewPutFileRequestWithBody(server string, params *PutFileParams, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/files") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "path", runtime.ParamLocationQuery, params.Path); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("PUT", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewGetHealthRequest generates requests for GetHealth +func NewGetHealthRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/health") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewWatchNotificationsRequest generates requests for WatchNotifications +func NewWatchNotificationsRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/ws/v1/watch/notify") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // GetFileWithResponse request + GetFileWithResponse(ctx context.Context, params *GetFileParams, reqEditors ...RequestEditorFn) (*GetFileResponse, error) + + // HeadFileWithResponse request + HeadFileWithResponse(ctx context.Context, params *HeadFileParams, reqEditors ...RequestEditorFn) (*HeadFileResponse, error) + + // OptionsFileWithResponse request + OptionsFileWithResponse(ctx context.Context, params *OptionsFileParams, reqEditors ...RequestEditorFn) (*OptionsFileResponse, error) + + // PutFileWithBodyWithResponse request with any body + PutFileWithBodyWithResponse(ctx context.Context, params *PutFileParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutFileResponse, error) + + PutFileWithTextBodyWithResponse(ctx context.Context, params *PutFileParams, body PutFileTextRequestBody, reqEditors ...RequestEditorFn) (*PutFileResponse, error) + + // GetHealthWithResponse request + GetHealthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthResponse, error) + + // WatchNotificationsWithResponse request + WatchNotificationsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*WatchNotificationsResponse, error) +} + +type GetFileResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r GetFileResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetFileResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type HeadFileResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r HeadFileResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r HeadFileResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type OptionsFileResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r OptionsFileResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r OptionsFileResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PutFileResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r PutFileResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PutFileResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetHealthResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r GetHealthResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetHealthResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type WatchNotificationsResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r WatchNotificationsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r WatchNotificationsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// GetFileWithResponse request returning *GetFileResponse +func (c *ClientWithResponses) GetFileWithResponse(ctx context.Context, params *GetFileParams, reqEditors ...RequestEditorFn) (*GetFileResponse, error) { + rsp, err := c.GetFile(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetFileResponse(rsp) +} + +// HeadFileWithResponse request returning *HeadFileResponse +func (c *ClientWithResponses) HeadFileWithResponse(ctx context.Context, params *HeadFileParams, reqEditors ...RequestEditorFn) (*HeadFileResponse, error) { + rsp, err := c.HeadFile(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseHeadFileResponse(rsp) +} + +// OptionsFileWithResponse request returning *OptionsFileResponse +func (c *ClientWithResponses) OptionsFileWithResponse(ctx context.Context, params *OptionsFileParams, reqEditors ...RequestEditorFn) (*OptionsFileResponse, error) { + rsp, err := c.OptionsFile(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseOptionsFileResponse(rsp) +} + +// PutFileWithBodyWithResponse request with arbitrary body returning *PutFileResponse +func (c *ClientWithResponses) PutFileWithBodyWithResponse(ctx context.Context, params *PutFileParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutFileResponse, error) { + rsp, err := c.PutFileWithBody(ctx, params, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutFileResponse(rsp) +} + +func (c *ClientWithResponses) PutFileWithTextBodyWithResponse(ctx context.Context, params *PutFileParams, body PutFileTextRequestBody, reqEditors ...RequestEditorFn) (*PutFileResponse, error) { + rsp, err := c.PutFileWithTextBody(ctx, params, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutFileResponse(rsp) +} + +// GetHealthWithResponse request returning *GetHealthResponse +func (c *ClientWithResponses) GetHealthWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetHealthResponse, error) { + rsp, err := c.GetHealth(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetHealthResponse(rsp) +} + +// WatchNotificationsWithResponse request returning *WatchNotificationsResponse +func (c *ClientWithResponses) WatchNotificationsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*WatchNotificationsResponse, error) { + rsp, err := c.WatchNotifications(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseWatchNotificationsResponse(rsp) +} + +// ParseGetFileResponse parses an HTTP response from a GetFileWithResponse call +func ParseGetFileResponse(rsp *http.Response) (*GetFileResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetFileResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseHeadFileResponse parses an HTTP response from a HeadFileWithResponse call +func ParseHeadFileResponse(rsp *http.Response) (*HeadFileResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &HeadFileResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseOptionsFileResponse parses an HTTP response from a OptionsFileWithResponse call +func ParseOptionsFileResponse(rsp *http.Response) (*OptionsFileResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &OptionsFileResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParsePutFileResponse parses an HTTP response from a PutFileWithResponse call +func ParsePutFileResponse(rsp *http.Response) (*PutFileResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PutFileResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseGetHealthResponse parses an HTTP response from a GetHealthWithResponse call +func ParseGetHealthResponse(rsp *http.Response) (*GetHealthResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetHealthResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseWatchNotificationsResponse parses an HTTP response from a WatchNotificationsWithResponse call +func ParseWatchNotificationsResponse(rsp *http.Response) (*WatchNotificationsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &WatchNotificationsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ServerInterface represents all server handlers. +type ServerInterface interface { + // Read a file from the runtime root + // (GET /api/v1/files) + GetFile(c *fiber.Ctx, params GetFileParams) error + // Check if a runtime file exists + // (HEAD /api/v1/files) + HeadFile(c *fiber.Ctx, params HeadFileParams) error + // Return CORS/WebDAV file access options + // (OPTIONS /api/v1/files) + OptionsFile(c *fiber.Ctx, params OptionsFileParams) error + // Write a file into the runtime root + // (PUT /api/v1/files) + PutFile(c *fiber.Ctx, params PutFileParams) error + // Check dev server health + // (GET /health) + GetHealth(c *fiber.Ctx) error + // Subscribe to file change and build notifications + // (GET /ws/v1/watch/notify) + WatchNotifications(c *fiber.Ctx) error +} + +// ServerInterfaceWrapper converts contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface +} + +type MiddlewareFunc fiber.Handler + +// GetFile operation middleware +func (siw *ServerInterfaceWrapper) GetFile(c *fiber.Ctx) error { + + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params GetFileParams + + var query url.Values + query, err = url.ParseQuery(string(c.Request().URI().QueryString())) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for query string: %w", err).Error()) + } + + // ------------- Required query parameter "path" ------------- + + if paramValue := c.Query("path"); paramValue != "" { + + } else { + err = fmt.Errorf("Query argument path is required, but not found") + c.Status(fiber.StatusBadRequest).JSON(err) + return err + } + + err = runtime.BindQueryParameter("form", true, true, "path", query, ¶ms.Path) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter path: %w", err).Error()) + } + + return siw.Handler.GetFile(c, params) +} + +// HeadFile operation middleware +func (siw *ServerInterfaceWrapper) HeadFile(c *fiber.Ctx) error { + + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params HeadFileParams + + var query url.Values + query, err = url.ParseQuery(string(c.Request().URI().QueryString())) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for query string: %w", err).Error()) + } + + // ------------- Required query parameter "path" ------------- + + if paramValue := c.Query("path"); paramValue != "" { + + } else { + err = fmt.Errorf("Query argument path is required, but not found") + c.Status(fiber.StatusBadRequest).JSON(err) + return err + } + + err = runtime.BindQueryParameter("form", true, true, "path", query, ¶ms.Path) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter path: %w", err).Error()) + } + + return siw.Handler.HeadFile(c, params) +} + +// OptionsFile operation middleware +func (siw *ServerInterfaceWrapper) OptionsFile(c *fiber.Ctx) error { + + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params OptionsFileParams + + var query url.Values + query, err = url.ParseQuery(string(c.Request().URI().QueryString())) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for query string: %w", err).Error()) + } + + // ------------- Required query parameter "path" ------------- + + if paramValue := c.Query("path"); paramValue != "" { + + } else { + err = fmt.Errorf("Query argument path is required, but not found") + c.Status(fiber.StatusBadRequest).JSON(err) + return err + } + + err = runtime.BindQueryParameter("form", true, true, "path", query, ¶ms.Path) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter path: %w", err).Error()) + } + + return siw.Handler.OptionsFile(c, params) +} + +// PutFile operation middleware +func (siw *ServerInterfaceWrapper) PutFile(c *fiber.Ctx) error { + + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params PutFileParams + + var query url.Values + query, err = url.ParseQuery(string(c.Request().URI().QueryString())) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for query string: %w", err).Error()) + } + + // ------------- Required query parameter "path" ------------- + + if paramValue := c.Query("path"); paramValue != "" { + + } else { + err = fmt.Errorf("Query argument path is required, but not found") + c.Status(fiber.StatusBadRequest).JSON(err) + return err + } + + err = runtime.BindQueryParameter("form", true, true, "path", query, ¶ms.Path) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Errorf("Invalid format for parameter path: %w", err).Error()) + } + + return siw.Handler.PutFile(c, params) +} + +// GetHealth operation middleware +func (siw *ServerInterfaceWrapper) GetHealth(c *fiber.Ctx) error { + + return siw.Handler.GetHealth(c) +} + +// WatchNotifications operation middleware +func (siw *ServerInterfaceWrapper) WatchNotifications(c *fiber.Ctx) error { + + return siw.Handler.WatchNotifications(c) +} + +// FiberServerOptions provides options for the Fiber server. +type FiberServerOptions struct { + BaseURL string + Middlewares []MiddlewareFunc +} + +// RegisterHandlers creates http.Handler with routing matching OpenAPI spec. +func RegisterHandlers(router fiber.Router, si ServerInterface) { + RegisterHandlersWithOptions(router, si, FiberServerOptions{}) +} + +// RegisterHandlersWithOptions creates http.Handler with additional options +func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, options FiberServerOptions) { + wrapper := ServerInterfaceWrapper{ + Handler: si, + } + + for _, m := range options.Middlewares { + router.Use(fiber.Handler(m)) + } + + router.Get(options.BaseURL+"/api/v1/files", wrapper.GetFile) + + router.Head(options.BaseURL+"/api/v1/files", wrapper.HeadFile) + + router.Options(options.BaseURL+"/api/v1/files", wrapper.OptionsFile) + + router.Put(options.BaseURL+"/api/v1/files", wrapper.PutFile) + + router.Get(options.BaseURL+"/health", wrapper.GetHealth) + + router.Get(options.BaseURL+"/ws/v1/watch/notify", wrapper.WatchNotifications) + +} + +// Base64 encoded, gzipped, json marshaled Swagger object +var swaggerSpec = []string{ + + "H4sIAAAAAAAC/8xWzY7bNhB+FWLao9ay2z3plmSRZi/JwgbqQ7GHsTiyWEsklxzJaxh694KU/LMrbRIU", + "KZqTbXJIzvczMz5CbmprNGn2kB3BosOamFz89VFV9IBchu+SfO6UZWU0ZLBsNKuabpwxfOOoQlYtiUJV", + "JCxymYjCOEHPWNuKhETG1DrVIlNqMd/hlmZ/e6NnkIAK1z015A6QgMaaIINwBSTg6KlRjiRk7BpKwOcl", + "1RiS4YMNcZ6d0lvoui4Ee2u0p5j4e5RLemrI8zj1e91ipaRwQ0CXwGfDH02j5Tg4MCC0YVHE/fiS0oV5", + "kxEhqRV743bkxLuH+8hDpAXznLxPRElYBYJQS7FHzstwvSpUjuEiHyhhxVWAd+caJcUdtWJFriUHCbTk", + "fP/efLaYzUPyxpJGqyCD3+NSEumLNKRoVdou0pBAXNhSJMRYcvG5ewkZ/EEcYMaDF/X/OsKvjgrI4Jf0", + "4pH0EpKe3dE9vqL/t/k8fORGM+n4IlpbDRBTkzPxjWdHWIe9i6yFcTUyZLBRGqMhXgudANMzp7bC4Jvj", + "1y0xIeWQkg/E3fZZTqE8o0mvnBSP3H77yNlNIQff1HWAksGSUArszVA4UwsuSbjBNaGOAlzcBuqhF+yx", + "S6AklGPJPhHK/0azCc7oWfn/hbEPJeU7oQqBZ56Kq4Sm6DIxcz9m7Eu/8cNJux2TNjwlHHHjNI19EJbF", + "hy/LVbqmzd27P68bhDghmEJnm4nyfWh+SPlGxd4befgZKvdl5+++h/Vo1b1TzKT/lVdfiLR2iulUrUqz", + "+Z5q7RJI+/b+tV77qY/4Zst8i65hqkIGZjfB86jvhfnh4/wQyg/j5zBZafISWZ6SPGEcFnqQex+mSpxe", + "aZxehzcBr0PQ5+sJ9xr5Yr4Yq7mmzcrkO2LR2K1D2ZeH5VE1rZpNOLchwaZXKy9RbynO102jKvlyvl5B", + "ivkHROHCCLuvm8ZVkEEa62KIPZ7+mQw0dMl5pRf/aqG/tnvs/gkAAP//kl/AgFwJAAA=", +} + +// GetSwagger returns the content of the embedded swagger specification file +// or error if failed to decode +func decodeSpec() ([]byte, error) { + zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) + if err != nil { + return nil, fmt.Errorf("error base64 decoding spec: %w", err) + } + zr, err := gzip.NewReader(bytes.NewReader(zipped)) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + var buf bytes.Buffer + _, err = buf.ReadFrom(zr) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + + return buf.Bytes(), nil +} + +var rawSpec = decodeSpecCached() + +// a naive cached of a decoded swagger spec +func decodeSpecCached() func() ([]byte, error) { + data, err := decodeSpec() + return func() ([]byte, error) { + return data, err + } +} + +// Constructs a synthetic filesystem for resolving external references when loading openapi specifications. +func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { + res := make(map[string]func() ([]byte, error)) + if len(pathToFile) > 0 { + res[pathToFile] = rawSpec + } + + return res +} + +// GetSwagger returns the Swagger specification corresponding to the generated code +// in this file. The external references of Swagger specification are resolved. +// The logic of resolving external references is tightly connected to "import-mapping" feature. +// Externally referenced files must be embedded in the corresponding golang packages. +// Urls can be supported but this task was out of the scope. +func GetSwagger() (swagger *openapi3.T, err error) { + resolvePath := PathToRawSpec("") + + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { + pathToFile := url.String() + pathToFile = path.Clean(pathToFile) + getSpec, ok := resolvePath[pathToFile] + if !ok { + err1 := fmt.Errorf("path not found: %s", pathToFile) + return nil, err1 + } + return getSpec() + } + var specData []byte + specData, err = rawSpec() + if err != nil { + return + } + swagger, err = loader.LoadFromData(specData) + if err != nil { + return + } + return +} diff --git a/internal/routing/publish.go b/internal/routing/publish.go new file mode 100644 index 00000000..946188a2 --- /dev/null +++ b/internal/routing/publish.go @@ -0,0 +1,92 @@ +package routing + +import ( + "fmt" + "strconv" + "strings" + + "github.com/highcard-dev/daemon/internal/api" +) + +func AssignmentsFromPublishes(publishes []string, targets []api.RuntimeRoutingTarget, id string) ([]api.RuntimeRouteAssignment, error) { + assignments := make([]api.RuntimeRouteAssignment, 0, len(publishes)) + for _, publish := range publishes { + externalIP := "127.0.0.1" + parts := strings.Split(publish, ":") + if len(parts) == 3 { + externalIP = parts[0] + parts = parts[1:] + } + if len(parts) != 2 || externalIP == "" || parts[0] == "" || parts[1] == "" { + return nil, fmt.Errorf("invalid publish %q, want [external-ip:]public-port:target[/protocol]", publish) + } + publicPort, err := strconv.Atoi(parts[0]) + if err != nil || publicPort < 1 || publicPort > 65535 { + return nil, fmt.Errorf("invalid public port in publish %q", publish) + } + targetName, protocolOverride, hasProtocolOverride := strings.Cut(parts[1], "/") + if targetName == "" || (hasProtocolOverride && protocolOverride == "") { + return nil, fmt.Errorf("invalid target in publish %q", publish) + } + + target, err := targetForPublish(targetName, targets, id) + if err != nil { + return nil, err + } + protocol := target.Protocol + if hasProtocolOverride { + protocol = protocolOverride + } + if protocol == "" { + protocol = "tcp" + } + + host := "localhost" + name := target.Name + portName := target.PortName + publicPortValue := publicPort + assignment := api.RuntimeRouteAssignment{ + Name: &name, + PortName: &portName, + ExternalIp: &externalIP, + PublicPort: &publicPortValue, + Host: &host, + Protocol: &protocol, + } + if protocol == "http" || protocol == "https" { + url := fmt.Sprintf("%s://%s:%d", protocol, host, publicPort) + assignment.Url = &url + } + assignments = append(assignments, assignment) + } + return assignments, nil +} + +func targetForPublish(targetName string, targets []api.RuntimeRoutingTarget, id string) (api.RuntimeRoutingTarget, error) { + matches := make([]api.RuntimeRoutingTarget, 0, 1) + for _, target := range targets { + if target.PortName == targetName { + matches = append(matches, target) + } + } + if len(matches) == 0 { + if targetPort, err := strconv.Atoi(targetName); err == nil { + for _, target := range targets { + if target.Port == targetPort { + matches = append(matches, target) + } + } + } + } + if len(matches) == 1 { + return matches[0], nil + } + command := "druid routing targets " + if id != "" { + command = "druid routing targets " + id + } + if len(matches) == 0 { + return api.RuntimeRoutingTarget{}, fmt.Errorf("routing target %q not found; run %q", targetName, command) + } + return api.RuntimeRoutingTarget{}, fmt.Errorf("routing target %q is ambiguous; run %q", targetName, command) +} diff --git a/internal/routing/publish_test.go b/internal/routing/publish_test.go new file mode 100644 index 00000000..88d7f3e7 --- /dev/null +++ b/internal/routing/publish_test.go @@ -0,0 +1,74 @@ +package routing + +import ( + "strings" + "testing" + + "github.com/highcard-dev/daemon/internal/api" +) + +func TestAssignmentsFromPublishes(t *testing.T) { + targets := []api.RuntimeRoutingTarget{ + {Name: "web-http", PortName: "http", Port: 80, Protocol: "http"}, + {Name: "db-postgres", PortName: "postgres", Port: 5432, Protocol: "tcp"}, + } + + assignments, err := AssignmentsFromPublishes([]string{ + "8080:http", + "0.0.0.0:15432:5432", + "8443:http/https", + }, targets, "scroll-a") + if err != nil { + t.Fatal(err) + } + if len(assignments) != 3 { + t.Fatalf("assignments = %d, want 3", len(assignments)) + } + assertAssignment(t, assignments[0], "web-http", "http", "127.0.0.1", 8080, "localhost", "http", "http://localhost:8080") + assertAssignment(t, assignments[1], "db-postgres", "postgres", "0.0.0.0", 15432, "localhost", "tcp", "") + assertAssignment(t, assignments[2], "web-http", "http", "127.0.0.1", 8443, "localhost", "https", "https://localhost:8443") +} + +func TestAssignmentsFromPublishesErrors(t *testing.T) { + targets := []api.RuntimeRoutingTarget{ + {Name: "web-a", PortName: "http-a", Port: 80, Protocol: "http"}, + {Name: "web-b", PortName: "http-b", Port: 80, Protocol: "http"}, + } + for _, tc := range []struct { + name string + spec string + wantErr string + }{ + {name: "invalid syntax", spec: "8080", wantErr: "invalid publish"}, + {name: "missing target", spec: "8080:http", wantErr: "druid routing targets scroll-a"}, + {name: "ambiguous port", spec: "8080:80", wantErr: "ambiguous"}, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := AssignmentsFromPublishes([]string{tc.spec}, targets, "scroll-a") + if err == nil || !strings.Contains(err.Error(), tc.wantErr) { + t.Fatalf("error = %v, want contains %q", err, tc.wantErr) + } + }) + } +} + +func assertAssignment(t *testing.T, assignment api.RuntimeRouteAssignment, name string, portName string, externalIP string, publicPort int, host string, protocol string, url string) { + t.Helper() + if value(assignment.Name) != name || value(assignment.PortName) != portName || value(assignment.ExternalIp) != externalIP || intValue(assignment.PublicPort) != publicPort || value(assignment.Host) != host || value(assignment.Protocol) != protocol || value(assignment.Url) != url { + t.Fatalf("assignment = %#v", assignment) + } +} + +func value(in *string) string { + if in == nil { + return "" + } + return *in +} + +func intValue(in *int) int { + if in == nil { + return 0 + } + return *in +} diff --git a/internal/runtime/backend.go b/internal/runtime/backend.go index b3de79f8..e68bd05e 100644 --- a/internal/runtime/backend.go +++ b/internal/runtime/backend.go @@ -4,12 +4,20 @@ import ( "fmt" "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/runtime/docker" runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" + "github.com/highcard-dev/daemon/internal/utils" ) +type Runtime struct { + Backend ports.RuntimeBackendInterface + Store coreservices.RuntimeScrollStore +} + type Options struct { Kubernetes runtimekubernetes.Config + Docker docker.Config } type Option func(*Options) @@ -20,17 +28,78 @@ func WithKubernetesConfig(config runtimekubernetes.Config) Option { } } -func NewBackend(name string, consoleManager ports.ConsoleManagerInterface, opts ...Option) (ports.RuntimeBackendInterface, error) { +func WithDockerConfig(config docker.Config) Option { + return func(options *Options) { + options.Docker = config + } +} + +var newDockerBackend = func(config docker.Config, consoleManager ports.ConsoleManagerInterface) (ports.RuntimeBackendInterface, error) { + return docker.NewWithConfig(config, consoleManager) +} + +var newKubernetesBackend = func(config runtimekubernetes.Config, consoleManager ports.ConsoleManagerInterface) (ports.RuntimeBackendInterface, error) { + return runtimekubernetes.New(config, consoleManager) +} + +var newKubernetesStateStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { + return runtimekubernetes.NewConfigMapStateStore(config) +} + +func NewRuntime(name string, consoleManager ports.ConsoleManagerInterface, stateDir string, opts ...Option) (*Runtime, error) { options := Options{} for _, opt := range opts { opt(&options) } switch name { case "", "docker": - return docker.New(consoleManager) + backend, err := newDockerBackend(options.Docker, consoleManager) + if err != nil { + return nil, err + } + store, err := newSQLiteStore(stateDir) + if err != nil { + return nil, err + } + return &Runtime{ + Backend: backend, + Store: dockerRuntimeStore{RuntimeScrollStore: store, config: options.Docker.WithDefaults()}, + }, nil case "kubernetes": - return runtimekubernetes.New(options.Kubernetes, consoleManager) + backend, err := newKubernetesBackend(options.Kubernetes, consoleManager) + if err != nil { + return nil, err + } + store, err := newKubernetesStateStore(options.Kubernetes) + if err != nil { + return nil, err + } + return &Runtime{Backend: backend, Store: store}, nil default: return nil, fmt.Errorf("unknown runtime backend %q", name) } } + +func newSQLiteStore(stateDir string) (coreservices.RuntimeScrollStore, error) { + if stateDir == "" { + defaultStateDir, err := utils.DefaultRuntimeStateDir() + if err != nil { + return nil, err + } + stateDir = defaultStateDir + } + return coreservices.NewRuntimeStateStore(stateDir), nil +} + +type dockerRuntimeStore struct { + coreservices.RuntimeScrollStore + config docker.Config +} + +func (s dockerRuntimeStore) Root(id string) string { + root, err := s.config.RuntimeRootRef(id) + if err != nil { + return s.RuntimeScrollStore.Root(id) + } + return root +} diff --git a/internal/runtime/backend_factory_test.go b/internal/runtime/backend_factory_test.go new file mode 100644 index 00000000..ce0de548 --- /dev/null +++ b/internal/runtime/backend_factory_test.go @@ -0,0 +1,170 @@ +package runtime + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" + coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/runtime/docker" + runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" +) + +func TestNewRuntimeDockerOwnsStoreSelection(t *testing.T) { + previousDocker := newDockerBackend + newDockerBackend = func(config docker.Config, consoleManager ports.ConsoleManagerInterface) (ports.RuntimeBackendInterface, error) { + if config.VolumePrefix != "lab" { + t.Fatalf("volume prefix = %s, want lab", config.VolumePrefix) + } + return fakeBackend{name: "docker"}, nil + } + t.Cleanup(func() { newDockerBackend = previousDocker }) + + runtime, err := NewRuntime("docker", nil, t.TempDir(), WithDockerConfig(docker.Config{VolumePrefix: "lab"})) + if err != nil { + t.Fatal(err) + } + if runtime.Backend.Name() != "docker" { + t.Fatalf("backend = %s, want docker", runtime.Backend.Name()) + } + if got := runtime.Store.Root("scroll-a"); got != "docker-volume://lab-scroll-a-data" { + t.Fatalf("Root = %s", got) + } +} + +func TestNewRuntimeKubernetesOwnsStoreSelection(t *testing.T) { + stateDir := t.TempDir() + previousBackend := newKubernetesBackend + previousStore := newKubernetesStateStore + newKubernetesBackend = func(config runtimekubernetes.Config, consoleManager ports.ConsoleManagerInterface) (ports.RuntimeBackendInterface, error) { + if config.Namespace != "druid" { + t.Fatalf("backend namespace = %s, want druid", config.Namespace) + } + return fakeBackend{name: "kubernetes"}, nil + } + newKubernetesStateStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { + if config.Namespace != "druid" { + t.Fatalf("store namespace = %s, want druid", config.Namespace) + } + return fakeStore{state: "kubernetes:druid/configmaps"}, nil + } + t.Cleanup(func() { + newKubernetesBackend = previousBackend + newKubernetesStateStore = previousStore + }) + + runtime, err := NewRuntime("kubernetes", nil, stateDir, WithKubernetesConfig(runtimekubernetes.Config{Namespace: "druid"})) + if err != nil { + t.Fatal(err) + } + if runtime.Backend.Name() != "kubernetes" { + t.Fatalf("backend = %s, want kubernetes", runtime.Backend.Name()) + } + if runtime.Store.StateDir() != "kubernetes:druid/configmaps" { + t.Fatalf("StateDir = %s, want kubernetes:druid/configmaps", runtime.Store.StateDir()) + } + if _, err := os.Stat(filepath.Join(stateDir, "state.db")); !os.IsNotExist(err) { + t.Fatalf("state.db stat error = %v, want not exist", err) + } +} + +func TestNewRuntimeUnknownBackendErrorsOnce(t *testing.T) { + _, err := NewRuntime("nope", nil, t.TempDir()) + if err == nil || !strings.Contains(err.Error(), `unknown runtime backend "nope"`) { + t.Fatalf("error = %v", err) + } +} + +type fakeBackend struct { + name string +} + +func (f fakeBackend) Name() string { + return f.name +} + +func (f fakeBackend) ReadScrollFile(root string) ([]byte, error) { + return nil, nil +} + +func (f fakeBackend) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { + return nil +} + +func (f fakeBackend) StopDev(ctx context.Context, root string) error { return nil } + +func (f fakeBackend) RunCommand(command ports.RuntimeCommand) (*int, error) { + return nil, nil +} + +func (f fakeBackend) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { + return nil, nil +} + +func (f fakeBackend) RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { + return nil, nil +} + +func (f fakeBackend) StopRuntime(root string) error { + return nil +} + +func (f fakeBackend) DeleteRuntime(root string, purgeData bool) error { + return nil +} + +func (f fakeBackend) BackupRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { + return nil +} + +func (f fakeBackend) RestoreRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { + return nil +} + +func (f fakeBackend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorkerAction) error { + return nil +} + +func (f fakeBackend) Attach(commandName string, data string) error { + return nil +} + +func (f fakeBackend) Signal(commandName string, target string, signal string, root string) error { + return nil +} + +type fakeStore struct { + state string +} + +func (f fakeStore) StateDir() string { + return f.state +} + +func (f fakeStore) Root(id string) string { + return "" +} + +func (f fakeStore) CreateScroll(scroll *domain.RuntimeScroll) error { + return nil +} + +func (f fakeStore) ListScrolls() ([]*domain.RuntimeScroll, error) { + return nil, nil +} + +func (f fakeStore) GetScroll(id string) (*domain.RuntimeScroll, error) { + return nil, coreservices.ErrScrollNotFound +} + +func (f fakeStore) UpdateScroll(scroll *domain.RuntimeScroll) error { + return nil +} + +func (f fakeStore) DeleteScroll(id string) error { + return nil +} diff --git a/internal/runtime/docker/backend.go b/internal/runtime/docker/backend.go index 801b1738..ca8b1c82 100644 --- a/internal/runtime/docker/backend.go +++ b/internal/runtime/docker/backend.go @@ -1,6 +1,7 @@ package docker import ( + "archive/tar" "context" "crypto/sha1" "encoding/hex" @@ -17,9 +18,11 @@ import ( "sync" "time" + cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/mount" "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" @@ -30,12 +33,62 @@ import ( type Backend struct { client *client.Client consoleManager ports.ConsoleManagerInterface + config Config mu sync.Mutex containers map[string]string stdin map[string]io.Writer } +type Config struct { + WorkerImage string + Network string + Storage string + BindRoot string + VolumePrefix string +} + +func (c Config) WithDefaults() Config { + if c.WorkerImage == "" { + c.WorkerImage = os.Getenv("DRUID_DOCKER_WORKER_IMAGE") + } + if c.Network == "" { + c.Network = os.Getenv("DRUID_DOCKER_NETWORK") + } + if c.Storage == "" { + c.Storage = os.Getenv("DRUID_DOCKER_STORAGE") + } + if c.Storage == "" { + c.Storage = StorageVolume + } + if c.BindRoot == "" { + c.BindRoot = os.Getenv("DRUID_DOCKER_BIND_ROOT") + } + if c.VolumePrefix == "" { + c.VolumePrefix = os.Getenv("DRUID_DOCKER_VOLUME_PREFIX") + } + if c.VolumePrefix == "" { + c.VolumePrefix = "druid" + } + return c +} + func New(consoleManager ports.ConsoleManagerInterface) (*Backend, error) { + return NewWithConfig(Config{}, consoleManager) +} + +func NewWithConfig(config Config, consoleManager ports.ConsoleManagerInterface) (*Backend, error) { + config = config.WithDefaults() + if config.Storage != StorageVolume && config.Storage != StorageBind { + return nil, fmt.Errorf("unknown docker storage %q", config.Storage) + } + if config.Storage == StorageBind { + if config.BindRoot == "" { + return nil, fmt.Errorf("docker bind root is required when docker storage is bind") + } + if !filepath.IsAbs(config.BindRoot) { + return nil, fmt.Errorf("docker bind root must be absolute: %s", config.BindRoot) + } + } cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return nil, err @@ -43,6 +96,7 @@ func New(consoleManager ports.ConsoleManagerInterface) (*Backend, error) { return &Backend{ client: cli, consoleManager: consoleManager, + config: config, containers: map[string]string{}, stdin: map[string]io.Writer{}, }, nil @@ -52,38 +106,11 @@ func (b *Backend) Name() string { return "docker" } -func (b *Backend) ReadScrollFile(scrollRoot string) ([]byte, error) { - if scrollRoot == "" { - return nil, fmt.Errorf("scroll root is required") +func (b *Backend) ReadScrollFile(root string) ([]byte, error) { + if root == "" { + return nil, fmt.Errorf("runtime root is required") } - return os.ReadFile(filepath.Join(scrollRoot, "scroll.yaml")) -} - -func (b *Backend) ReadDataFile(_ context.Context, dataRoot string, relativePath string) ([]byte, error) { - filePath, err := dataFilePath(dataRoot, relativePath) - if err != nil { - return nil, err - } - return os.ReadFile(filePath) -} - -func (b *Backend) WriteDataFile(_ context.Context, dataRoot string, relativePath string, data []byte) error { - filePath, err := dataFilePath(dataRoot, relativePath) - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { - return err - } - return os.WriteFile(filePath, data, 0644) -} - -func dataFilePath(dataRoot string, relativePath string) (string, error) { - cleaned := filepath.Clean(strings.TrimPrefix(relativePath, "/")) - if cleaned == "." || cleaned == ".." || filepath.IsAbs(cleaned) || strings.HasPrefix(cleaned, ".."+string(os.PathSeparator)) { - return "", fmt.Errorf("invalid data file path %q", relativePath) - } - return filepath.Join(dataRoot, cleaned), nil + return b.readRootFile(context.Background(), root, "scroll.yaml") } func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { @@ -95,7 +122,7 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { } if command.Command.Run == domain.RunModePersistent { if procedure.IsSignal() { - if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.DataRoot); err != nil { + if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.Root); err != nil { return nil, err } continue @@ -103,12 +130,12 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { if procedure.Image == "" { return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) } - if err := b.startPersistentContainer(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.DataRoot, command.GlobalPorts, env); err != nil { + if err := b.startPersistentContainer(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.Root, command.GlobalPorts, env); err != nil { return nil, err } continue } - exitCode, err := b.runProcedure(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.DataRoot, command.GlobalPorts, env) + exitCode, err := b.runProcedure(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.Root, command.GlobalPorts, env) if err != nil { return exitCode, err } @@ -122,17 +149,17 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { return nil, nil } -func (b *Backend) runProcedure(consoleID string, procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*int, error) { +func (b *Backend) runProcedure(consoleID string, procedureName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { if procedure.IsSignal() { - return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, dataRoot) + return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, root) } if procedure.Image == "" { return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) } - return b.runContainer(consoleID, procedureName, procedure, dataRoot, globalPorts, env) + return b.runContainer(consoleID, procedureName, procedure, root, globalPorts, env) } -func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { +func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { statuses := []domain.RuntimePortStatus{} portsByName := portsByName(globalPorts) for commandName, command := range commands { @@ -147,7 +174,7 @@ func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.Com if procedure.Id != nil { procedureName = *procedure.Id } - containerStatuses, err := b.expectedPortsForProcedure(dataRoot, procedureName, procedure, portsByName) + containerStatuses, err := b.expectedPortsForProcedure(root, procedureName, procedure, portsByName) if err != nil { return nil, err } @@ -163,6 +190,153 @@ func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.Com return statuses, nil } +func (b *Backend) RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { + portsByName := portsByName(globalPorts) + targets := []domain.RuntimeRoutingTarget{{ + Name: "webdav", + Procedure: "dev", + PortName: "webdav", + Port: 8084, + Protocol: "https", + ServiceName: ContainerName(root, "dev"), + ServicePort: 8084, + }} + seen := map[string]struct{}{"webdav": {}} + commandNames := make([]string, 0, len(commands)) + for commandName := range commands { + commandNames = append(commandNames, commandName) + } + sort.Strings(commandNames) + for _, commandName := range commandNames { + command := commands[commandName] + if command == nil { + continue + } + for idx, procedure := range command.Procedures { + if procedure == nil || len(procedure.ExpectedPorts) == 0 { + continue + } + procedureName := domain.ProcedureName(commandName, idx, procedure) + for _, expectedPort := range procedure.ExpectedPorts { + if _, ok := seen[expectedPort.Name]; ok { + continue + } + port, ok := portsByName[expectedPort.Name] + if !ok { + return nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) + } + seen[expectedPort.Name] = struct{}{} + targets = append(targets, domain.RuntimeRoutingTarget{ + Name: expectedPort.Name, + Procedure: procedureName, + PortName: expectedPort.Name, + Port: port.Port, + Protocol: normalizeProtocol(port.Protocol), + ServiceName: ContainerName(root, procedureName), + ServicePort: port.Port, + }) + } + } + } + sort.Slice(targets, func(i, j int) bool { return targets[i].Name < targets[j].Name }) + return targets, nil +} + +func (b *Backend) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { + if b.config.WorkerImage == "" { + return fmt.Errorf("docker dev requires --docker-worker-image or DRUID_DOCKER_WORKER_IMAGE") + } + if action.RootRef == "" { + return fmt.Errorf("dev root ref is required") + } + if action.MountPath == "" { + action.MountPath = "/scroll" + } + if action.Listen == "" { + action.Listen = ":8084" + } + if err := b.pullImage(ctx, b.config.WorkerImage); err != nil { + return err + } + rootMount, err := DockerMount(action.RootRef, action.MountPath, false, "") + if err != nil { + return err + } + args := []string{ + "dev", + "--root", action.MountPath, + "--listen", action.Listen, + "--runtime-id", action.RuntimeID, + "--daemon-url", action.DaemonURL, + } + if action.DaemonToken != "" { + args = append(args, "--daemon-token", action.DaemonToken) + } + if action.OwnerID != "" { + args = append(args, "--owner-id", action.OwnerID) + } + if action.AuthJWKSURL != "" { + args = append(args, "--auth-jwks-url", action.AuthJWKSURL) + } + if action.RuntimeJWKSURL != "" { + args = append(args, "--runtime-jwks-url", action.RuntimeJWKSURL) + } + for _, path := range action.WatchPaths { + args = append(args, "--watch", path) + } + for _, command := range action.HotReloadCommands { + args = append(args, "--command", command) + } + hostConfig := &container.HostConfig{Mounts: []mount.Mount{rootMount}} + for _, assignment := range action.Routing { + if assignment.PublicPort == 0 || (assignment.PortName != "webdav" && assignment.Name != "webdav") { + continue + } + hostConfig.PortBindings = nat.PortMap{ + "8084/tcp": []nat.PortBinding{{ + HostIP: assignment.ExternalIP, + HostPort: fmt.Sprintf("%d", assignment.PublicPort), + }}, + } + break + } + if b.config.Network != "" { + hostConfig.NetworkMode = container.NetworkMode(b.config.Network) + } + name := ContainerName(action.RootRef, "dev") + _ = b.client.ContainerRemove(ctx, name, container.RemoveOptions{Force: true}) + created, err := b.client.ContainerCreate(ctx, &container.Config{ + Image: b.config.WorkerImage, + Entrypoint: []string{"druid"}, + Cmd: args, + ExposedPorts: nat.PortSet{"8084/tcp": struct{}{}}, + Labels: map[string]string{ + "druid.command": "dev", + "druid.runtime-id": action.RuntimeID, + "druid.root-hash": rootHash(action.RootRef), + }, + }, hostConfig, nil, nil, name) + if err != nil { + return err + } + if err := b.client.ContainerStart(ctx, created.ID, container.StartOptions{}); err != nil { + _ = b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + return err + } + return nil +} + +func (b *Backend) StopDev(ctx context.Context, root string) error { + if root == "" { + return fmt.Errorf("runtime root is required") + } + err := b.client.ContainerRemove(ctx, ContainerName(root, "dev"), container.RemoveOptions{Force: true}) + if err != nil && !cerrdefs.IsNotFound(err) { + return err + } + return nil +} + func (b *Backend) Attach(commandName string, data string) error { b.mu.Lock() stdin := b.stdin[commandName] @@ -174,12 +348,12 @@ func (b *Backend) Attach(commandName string, data string) error { return err } -func (b *Backend) Signal(_ string, target string, signal string, dataRoot string) error { +func (b *Backend) Signal(_ string, target string, signal string, root string) error { if target == "" { return nil } ctx := context.Background() - containerID := b.containerID(target, dataRoot) + containerID := b.containerID(target, root) options := container.StopOptions{} if signal != "" { options.Signal = signal @@ -187,14 +361,14 @@ func (b *Backend) Signal(_ string, target string, signal string, dataRoot string return b.client.ContainerStop(ctx, containerID, options) } -func (b *Backend) StopRuntime(dataRoot string) error { - if dataRoot == "" { - return fmt.Errorf("data root is required") +func (b *Backend) StopRuntime(root string) error { + if root == "" { + return fmt.Errorf("runtime root is required") } ctx := context.Background() items, err := b.client.ContainerList(ctx, container.ListOptions{ All: true, - Filters: filters.NewArgs(filters.Arg("label", "druid.data-root-hash="+dataRootHash(dataRoot))), + Filters: filters.NewArgs(filters.Arg("label", "druid.root-hash="+rootHash(root))), }) if err != nil { return err @@ -215,34 +389,390 @@ func (b *Backend) StopRuntime(dataRoot string) error { return nil } -func (b *Backend) DeleteRuntime(dataRoot string, purgeData bool) error { - if err := b.StopRuntime(dataRoot); err != nil { +func (b *Backend) DeleteRuntime(root string, purgeData bool) error { + if err := b.StopRuntime(root); err != nil { return err } if purgeData { - return os.RemoveAll(dataRoot) + ref, err := ParseRootRef(root) + if err != nil { + return err + } + if ref.Kind == StorageVolume { + return b.client.VolumeRemove(context.Background(), ref.Source, true) + } + return b.emptyRoot(context.Background(), root) } return nil } -func (b *Backend) runContainer(consoleID string, commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*int, error) { - ctx := context.Background() - if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { - return nil, err +func (b *Backend) BackupRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { + if artifact == "" { + return fmt.Errorf("backup artifact is required") + } + return b.runWorkerRootCommand(ctx, root, []string{ + "worker", "push", + "--artifact", artifact, + "--root", "/scroll", + }, registryCredentials) +} + +func (b *Backend) RestoreRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { + if artifact == "" { + return fmt.Errorf("restore artifact is required") + } + if err := b.StopRuntime(root); err != nil { + return err + } + return b.runWorkerRootCommand(ctx, root, []string{ + "worker", "pull", + "--artifact", artifact, + "--runtime-id", rootHash(root), + "--mode", string(ports.RuntimeWorkerModeCreate), + "--root", "/scroll", + }, registryCredentials) +} + +func (b *Backend) readRootFile(ctx context.Context, root string, relativePath string) ([]byte, error) { + var data []byte + err := b.withHelperContainer(ctx, root, func(containerID string) error { + reader, _, err := b.client.CopyFromContainer(ctx, containerID, "/scroll/"+relativePath) + if err != nil { + return err + } + defer reader.Close() + tarReader := tar.NewReader(reader) + for { + header, err := tarReader.Next() + if err != nil { + if errors.Is(err, io.EOF) { + return fmt.Errorf("file %s not found in root", relativePath) + } + return err + } + if header.Typeflag == tar.TypeReg { + data, err = io.ReadAll(tarReader) + return err + } + } + }) + return data, err +} + +func (b *Backend) emptyRoot(ctx context.Context, root string) error { + return b.withHelperContainer(ctx, root, func(containerID string) error { + return b.runContainerCommand(ctx, containerID, []string{"sh", "-c", "find /scroll -mindepth 1 -maxdepth 1 -exec rm -rf {} +"}) + }) +} + +func (b *Backend) runWorkerRootCommand(ctx context.Context, root string, command []string, registryCredentials []domain.RegistryCredential) error { + if b.config.WorkerImage == "" { + return fmt.Errorf("docker worker image is required; set --docker-worker-image or DRUID_DOCKER_WORKER_IMAGE") + } + rootMount, err := DockerMount(root, "/scroll", false, "") + if err != nil { + return err + } + if err := b.pullImage(ctx, b.config.WorkerImage); err != nil { + return err + } + registryConfig, err := json.Marshal(struct { + Registries []domain.RegistryCredential `json:"registries"` + }{Registries: registryCredentials}) + if err != nil { + return err + } + hostConfig := &container.HostConfig{Mounts: []mount.Mount{rootMount}} + if b.config.Network != "" { + hostConfig.NetworkMode = container.NetworkMode(b.config.Network) + } + name := fmt.Sprintf("druid-worker-%s-%d", rootHash(root), time.Now().UnixNano()) + created, err := b.client.ContainerCreate(ctx, &container.Config{ + Image: b.config.WorkerImage, + Entrypoint: []string{"druid"}, + Cmd: command, + Env: dockerWorkerEnv([]string{ + "DRUID_RUNTIME_REGISTRY_CONFIG_JSON=" + string(registryConfig), + }), + Labels: map[string]string{ + "druid.worker": "root", + "druid.root-hash": rootHash(root), + }, + }, hostConfig, nil, nil, name) + if err != nil { + return err + } + defer b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + if err := b.client.ContainerStart(ctx, created.ID, container.StartOptions{}); err != nil { + return err + } + statusCh, errCh := b.client.ContainerWait(ctx, created.ID, container.WaitConditionNotRunning) + select { + case err := <-errCh: + if err != nil { + return err + } + case status := <-statusCh: + if status.StatusCode != 0 { + logs, _ := b.client.ContainerLogs(context.Background(), created.ID, container.LogsOptions{ShowStdout: true, ShowStderr: true}) + defer func() { + if logs != nil { + logs.Close() + } + }() + var message strings.Builder + if logs != nil { + _, _ = io.Copy(&message, logs) + } + return fmt.Errorf("worker container exited with %d: %s", status.StatusCode, strings.TrimSpace(message.String())) + } + } + return nil +} + +func (b *Backend) ensureProcedureMountPaths(ctx context.Context, root string, mounts []domain.Mount) error { + if len(mounts) == 0 { + return nil + } + ref, err := ParseRootRef(root) + if err != nil { + return err + } + if ref.Kind == StorageBind { + return nil + } + paths := make([]string, 0, len(mounts)) + for _, mount := range mounts { + cleaned, err := cleanRootSubPath(procedureDataSubPath(mount.SubPath)) + if err != nil { + return err + } + paths = append(paths, "/scroll/"+cleaned) + } + return b.withHelperContainer(ctx, root, func(containerID string) error { + return b.runContainerCommand(ctx, containerID, append([]string{"mkdir", "-p"}, paths...)) + }) +} + +func (b *Backend) ensureVolumeSubpathSupport(ctx context.Context, root string, mounts []domain.Mount) error { + if len(mounts) == 0 { + return nil + } + ref, err := ParseRootRef(root) + if err != nil { + return err + } + if ref.Kind != StorageVolume { + return nil + } + version, err := b.client.ServerVersion(ctx) + if err != nil { + return err + } + if !dockerAPIVersionAtLeast(version.APIVersion, 1, 45) { + return fmt.Errorf("docker volume subpath mounts require Docker API >= 1.45, got %s", version.APIVersion) + } + return nil +} + +func dockerAPIVersionAtLeast(version string, wantMajor int, wantMinor int) bool { + majorText, minorText, ok := strings.Cut(version, ".") + if !ok { + return false + } + major, err := strconv.Atoi(majorText) + if err != nil { + return false + } + minor, err := strconv.Atoi(minorText) + if err != nil { + return false + } + if major != wantMajor { + return major > wantMajor + } + return minor >= wantMinor +} + +func (b *Backend) withHelperContainer(ctx context.Context, root string, fn func(containerID string) error) error { + if b.config.WorkerImage == "" { + return fmt.Errorf("docker worker image is required; set --docker-worker-image or DRUID_DOCKER_WORKER_IMAGE") + } + rootMount, err := DockerMount(root, "/scroll", false, "") + if err != nil { + return err + } + if err := b.pullImage(ctx, b.config.WorkerImage); err != nil { + return err + } + hostConfig := &container.HostConfig{Mounts: []mount.Mount{rootMount}} + if b.config.Network != "" { + hostConfig.NetworkMode = container.NetworkMode(b.config.Network) + } + name := fmt.Sprintf("druid-helper-%s-%d", rootHash(root), time.Now().UnixNano()) + created, err := b.client.ContainerCreate(ctx, &container.Config{ + Image: b.config.WorkerImage, + Entrypoint: []string{"/bin/sh", "-c"}, + Cmd: []string{"sleep 300"}, + Labels: map[string]string{ + "druid.helper": "root", + "druid.root-hash": rootHash(root), + }, + }, hostConfig, nil, nil, name) + if err != nil { + return err + } + defer b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + if err := b.client.ContainerStart(ctx, created.ID, container.StartOptions{}); err != nil { + return err + } + return fn(created.ID) +} + +func (b *Backend) runContainerCommand(ctx context.Context, containerID string, command []string) error { + execID, err := b.client.ContainerExecCreate(ctx, containerID, container.ExecOptions{ + Cmd: command, + AttachStdout: true, + AttachStderr: true, + }) + if err != nil { + return err + } + attach, err := b.client.ContainerExecAttach(ctx, execID.ID, container.ExecAttachOptions{}) + if err != nil { + return err + } + var output strings.Builder + _, _ = io.Copy(&output, attach.Reader) + attach.Close() + inspect, err := b.client.ContainerExecInspect(ctx, execID.ID) + if err != nil { + return err } + if inspect.ExitCode != 0 { + return fmt.Errorf("helper command exited with %d: %s", inspect.ExitCode, strings.TrimSpace(output.String())) + } + return nil +} + +func (b *Backend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorkerAction) error { + if b.config.WorkerImage == "" { + return fmt.Errorf("docker worker image is required; set --docker-worker-image or DRUID_DOCKER_WORKER_IMAGE") + } + root := action.RootRef + if root == "" { + return fmt.Errorf("worker root ref is required") + } + if action.MountPath == "" { + action.MountPath = "/scroll" + } + if err := b.pullImage(ctx, b.config.WorkerImage); err != nil { + return err + } + registryConfig, err := json.Marshal(struct { + Registries []domain.RegistryCredential `json:"registries"` + }{Registries: action.RegistryCredentials}) + if err != nil { + return err + } + rootMount, err := DockerMount(root, action.MountPath, false, "") + if err != nil { + return err + } + artifact := action.Artifact + mounts := []mount.Mount{rootMount} + if info, statErr := os.Stat(action.Artifact); statErr == nil { + abs, err := filepath.Abs(action.Artifact) + if err != nil { + return err + } + if info.IsDir() { + mounts = append(mounts, mount.Mount{Type: mount.TypeBind, Source: abs, Target: "/artifact-src", ReadOnly: true}) + artifact = "/artifact-src" + } else { + mounts = append(mounts, mount.Mount{Type: mount.TypeBind, Source: filepath.Dir(abs), Target: "/artifact-src", ReadOnly: true}) + artifact = "/artifact-src/" + filepath.Base(abs) + } + } + hostConfig := &container.HostConfig{Mounts: mounts} + if b.config.Network != "" { + hostConfig.NetworkMode = container.NetworkMode(b.config.Network) + } + name := fmt.Sprintf("druid-worker-%s-%s", rootHash(root), rootHash(string(action.Mode)+action.Artifact)) + _ = b.client.ContainerRemove(ctx, name, container.RemoveOptions{Force: true}) + created, err := b.client.ContainerCreate(ctx, &container.Config{ + Image: b.config.WorkerImage, + Entrypoint: []string{"druid"}, + Cmd: []string{ + "worker", "pull", + "--artifact", artifact, + "--runtime-id", action.RuntimeID, + "--mode", string(action.Mode), + "--root", action.MountPath, + "--callback-url", action.CallbackURL, + }, + Env: dockerWorkerEnv([]string{ + "DRUID_WORKER_TOKEN=" + action.CallbackToken, + "DRUID_RUNTIME_REGISTRY_CONFIG_JSON=" + string(registryConfig), + }), + Labels: map[string]string{ + "druid.worker": "pull", + "druid.runtime-id": action.RuntimeID, + "druid.root-hash": rootHash(root), + }, + }, hostConfig, nil, nil, name) + if err != nil { + return err + } + defer b.client.ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true}) + if err := b.client.ContainerStart(ctx, created.ID, container.StartOptions{}); err != nil { + return err + } + statusCh, errCh := b.client.ContainerWait(ctx, created.ID, container.WaitConditionNotRunning) + select { + case err := <-errCh: + if err != nil { + return err + } + case status := <-statusCh: + if status.StatusCode != 0 { + logs, _ := b.client.ContainerLogs(context.Background(), created.ID, container.LogsOptions{ShowStdout: true, ShowStderr: true}) + defer func() { + if logs != nil { + logs.Close() + } + }() + var message strings.Builder + if logs != nil { + _, _ = io.Copy(&message, logs) + } + return fmt.Errorf("worker container exited with %d: %s", status.StatusCode, strings.TrimSpace(message.String())) + } + } + return nil +} + +func (b *Backend) runContainer(consoleID string, commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { + ctx := context.Background() if procedure.Image == "" { return nil, errors.New("docker image is required") } + if err := b.ensureVolumeSubpathSupport(ctx, root, procedure.Mounts); err != nil { + return nil, err + } + if err := b.ensureProcedureMountPaths(ctx, root, procedure.Mounts); err != nil { + return nil, err + } if err := b.pullImage(ctx, procedure.Image); err != nil { return nil, err } - config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts, env) + config, hostConfig, err := containerSpec(commandName, procedure, root, globalPorts, env) if err != nil { return nil, err } - containerName := ContainerName(dataRoot, commandName) + containerName := ContainerName(root, commandName) _ = b.client.ContainerRemove(ctx, containerName, container.RemoveOptions{Force: true}) created, err := b.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) @@ -312,22 +842,25 @@ func (b *Backend) runContainer(consoleID string, commandName string, procedure * return &exitCode, nil } -func (b *Backend) startPersistentContainer(consoleID string, commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) error { +func (b *Backend) startPersistentContainer(consoleID string, commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) error { ctx := context.Background() - if err := os.MkdirAll(filepath.Join(dataRoot, domain.RuntimeDataDir), 0755); err != nil { - return err - } if procedure.Image == "" { return errors.New("docker image is required") } + if err := b.ensureVolumeSubpathSupport(ctx, root, procedure.Mounts); err != nil { + return err + } + if err := b.ensureProcedureMountPaths(ctx, root, procedure.Mounts); err != nil { + return err + } if err := b.pullImage(ctx, procedure.Image); err != nil { return err } - config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts, env) + config, hostConfig, err := containerSpec(commandName, procedure, root, globalPorts, env) if err != nil { return err } - containerName := ContainerName(dataRoot, commandName) + containerName := ContainerName(root, commandName) _ = b.client.ContainerRemove(ctx, containerName, container.RemoveOptions{Force: true}) created, err := b.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) if err != nil { @@ -390,6 +923,11 @@ func (b *Backend) startPersistentContainer(consoleID string, commandName string, } func (b *Backend) pullImage(ctx context.Context, imageRef string) error { + if _, err := b.client.ImageInspect(ctx, imageRef); err == nil { + return nil + } else if !cerrdefs.IsNotFound(err) { + return err + } reader, err := b.client.ImagePull(ctx, imageRef, image.PullOptions{}) if err != nil { return err @@ -399,13 +937,13 @@ func (b *Backend) pullImage(ctx context.Context, imageRef string) error { return nil } -func (b *Backend) containerID(commandName string, dataRoot string) string { +func (b *Backend) containerID(commandName string, root string) string { b.mu.Lock() defer b.mu.Unlock() if id := b.containers[commandName]; id != "" { return id } - return ContainerName(dataRoot, commandName) + return ContainerName(root, commandName) } func (b *Backend) setContainer(commandName string, id string) { @@ -443,14 +981,10 @@ func (w channelWriter) Write(p []byte) (int, error) { return len(p), nil } -func containerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*container.Config, *container.HostConfig, error) { +func containerSpec(commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*container.Config, *container.HostConfig, error) { if procedure.Image == "" { return nil, nil, errors.New("docker image is required") } - runtimeDataRoot := filepath.Join(dataRoot, domain.RuntimeDataDir) - if err := os.MkdirAll(runtimeDataRoot, 0755); err != nil { - return nil, nil, err - } exposedPorts := nat.PortSet{} portBindings := nat.PortMap{} @@ -468,24 +1002,16 @@ func containerSpec(commandName string, procedure *domain.Procedure, dataRoot str portBindings[dockerPort] = []nat.PortBinding{{HostPort: fmt.Sprintf("%d", port.Port)}} } - binds := []string{} + mounts := []mount.Mount{} for _, mount := range procedure.Mounts { if mount.Path == "" { return nil, nil, fmt.Errorf("mount path is required") } - subPath := mount.SubPath - if subPath == "" { - subPath = "." - } - hostPath := filepath.Join(runtimeDataRoot, filepath.FromSlash(subPath)) - if err := os.MkdirAll(hostPath, 0755); err != nil { + dockerMount, err := DockerMount(root, mount.Path, mount.ReadOnly, procedureDataSubPath(mount.SubPath)) + if err != nil { return nil, nil, err } - bind := fmt.Sprintf("%s:%s", hostPath, mount.Path) - if mount.ReadOnly { - bind += ":ro" - } - binds = append(binds, bind) + mounts = append(mounts, dockerMount) } return &container.Config{ @@ -500,23 +1026,37 @@ func containerSpec(commandName string, procedure *domain.Procedure, dataRoot str OpenStdin: true, Tty: procedure.TTY, Labels: map[string]string{ - "druid.command": commandName, - "druid.data-root-hash": dataRootHash(dataRoot), + "druid.command": commandName, + "druid.root-hash": rootHash(root), }, }, &container.HostConfig{ - Binds: binds, + Mounts: mounts, PortBindings: portBindings, }, nil } -func ContainerName(scrollRoot string, commandName string) string { - hash := sha1.Sum([]byte(scrollRoot)) +func procedureDataSubPath(subPath string) string { + clean := filepath.ToSlash(filepath.Clean(strings.TrimPrefix(subPath, "/"))) + if subPath == "" { + return domain.RuntimeDataDir + } + if clean == "." { + return "." + } + if clean == domain.RuntimeDataDir || strings.HasPrefix(clean, domain.RuntimeDataDir+"/") { + return clean + } + return filepath.ToSlash(filepath.Join(domain.RuntimeDataDir, filepath.FromSlash(clean))) +} + +func ContainerName(root string, commandName string) string { + hash := sha1.Sum([]byte(root)) name := sanitizeContainerName(commandName) return fmt.Sprintf("druid-%s-%s", hex.EncodeToString(hash[:])[:10], name) } -func dataRootHash(dataRoot string) string { - hash := sha1.Sum([]byte(dataRoot)) +func rootHash(root string) string { + hash := sha1.Sum([]byte(root)) return hex.EncodeToString(hash[:])[:10] } @@ -553,22 +1093,29 @@ func envArgs(env map[string]string) []string { return args } +func dockerWorkerEnv(base []string) []string { + if plainHTTP := os.Getenv("DRUID_REGISTRY_PLAIN_HTTP"); plainHTTP != "" { + base = append(base, "DRUID_REGISTRY_PLAIN_HTTP="+plainHTTP) + } + return base +} + type ContainerSpec struct { Image string Command []string WorkingDir string Env []string - Binds []string + Mounts []mount.Mount PortBindings nat.PortMap TTY bool } -func BuildContainerSpec(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port) (*ContainerSpec, error) { - return BuildContainerSpecWithEnv(commandName, procedure, dataRoot, globalPorts, procedure.Env) +func BuildContainerSpec(commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port) (*ContainerSpec, error) { + return BuildContainerSpecWithEnv(commandName, procedure, root, globalPorts, procedure.Env) } -func BuildContainerSpecWithEnv(commandName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*ContainerSpec, error) { - config, hostConfig, err := containerSpec(commandName, procedure, dataRoot, globalPorts, env) +func BuildContainerSpecWithEnv(commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*ContainerSpec, error) { + config, hostConfig, err := containerSpec(commandName, procedure, root, globalPorts, env) if err != nil { return nil, err } @@ -577,7 +1124,7 @@ func BuildContainerSpecWithEnv(commandName string, procedure *domain.Procedure, Command: config.Cmd, WorkingDir: config.WorkingDir, Env: config.Env, - Binds: hostConfig.Binds, + Mounts: hostConfig.Mounts, PortBindings: hostConfig.PortBindings, TTY: config.Tty, }, nil @@ -663,13 +1210,13 @@ func (t containerTraffic) rxDelta(window time.Duration, now time.Time) uint64 { return t.rxBytes - base.rx } -func (b *Backend) expectedPortsForProcedure(dataRoot string, procedureName string, procedure *domain.Procedure, ports map[string]domain.Port) ([]domain.RuntimePortStatus, error) { +func (b *Backend) expectedPortsForProcedure(root string, procedureName string, procedure *domain.Procedure, ports map[string]domain.Port) ([]domain.RuntimePortStatus, error) { statuses := make([]domain.RuntimePortStatus, 0, len(procedure.ExpectedPorts)) - containerName := ContainerName(dataRoot, procedureName) + containerName := ContainerName(root, procedureName) ctx := context.Background() inspected, err := b.client.ContainerInspect(ctx, containerName) containerFound := err == nil - if err != nil && !client.IsErrNotFound(err) { + if err != nil && !cerrdefs.IsNotFound(err) { return nil, err } diff --git a/internal/runtime/docker/storage.go b/internal/runtime/docker/storage.go new file mode 100644 index 00000000..05354091 --- /dev/null +++ b/internal/runtime/docker/storage.go @@ -0,0 +1,135 @@ +package docker + +import ( + "fmt" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/api/types/mount" +) + +const ( + StorageVolume = "volume" + StorageBind = "bind" + + volumeRootPrefix = "docker-volume://" + bindRootPrefix = "docker-bind://" +) + +type RootRef struct { + Kind string + Source string +} + +func (c Config) RuntimeRootRef(id string) (string, error) { + c = c.WithDefaults() + name := sanitizeVolumePart(id) + if name == "" { + return "", fmt.Errorf("runtime id is required") + } + switch c.Storage { + case StorageVolume: + return volumeRootPrefix + sanitizeVolumePart(c.VolumePrefix+"-"+name+"-data"), nil + case StorageBind: + if c.BindRoot == "" { + return "", fmt.Errorf("docker bind root is required when docker storage is bind") + } + if !filepath.IsAbs(c.BindRoot) { + return "", fmt.Errorf("docker bind root must be absolute: %s", c.BindRoot) + } + return bindRootPrefix + filepath.Join(c.BindRoot, name), nil + default: + return "", fmt.Errorf("unknown docker storage %q", c.Storage) + } +} + +func ParseRootRef(root string) (RootRef, error) { + if root == "" { + return RootRef{}, fmt.Errorf("root ref is required") + } + if strings.HasPrefix(root, volumeRootPrefix) { + name := strings.TrimPrefix(root, volumeRootPrefix) + if name == "" || strings.Contains(name, "/") { + return RootRef{}, fmt.Errorf("invalid docker volume root ref %q", root) + } + return RootRef{Kind: StorageVolume, Source: name}, nil + } + if strings.HasPrefix(root, bindRootPrefix) { + path := strings.TrimPrefix(root, bindRootPrefix) + if !filepath.IsAbs(path) { + return RootRef{}, fmt.Errorf("docker bind root must be absolute: %s", path) + } + return RootRef{Kind: StorageBind, Source: filepath.Clean(path)}, nil + } + if filepath.IsAbs(root) { + return RootRef{Kind: StorageBind, Source: filepath.Clean(root)}, nil + } + return RootRef{}, fmt.Errorf("unsupported docker root ref %q", root) +} + +func DockerMount(root string, target string, readOnly bool, subPath string) (mount.Mount, error) { + ref, err := ParseRootRef(root) + if err != nil { + return mount.Mount{}, err + } + if target == "" { + return mount.Mount{}, fmt.Errorf("mount target is required") + } + cleanSubPath, err := cleanRootSubPath(subPath) + if err != nil { + return mount.Mount{}, err + } + switch ref.Kind { + case StorageVolume: + result := mount.Mount{ + Type: mount.TypeVolume, + Source: ref.Source, + Target: target, + ReadOnly: readOnly, + } + if cleanSubPath != "" { + result.VolumeOptions = &mount.VolumeOptions{Subpath: cleanSubPath} + } + return result, nil + case StorageBind: + source := ref.Source + if cleanSubPath != "" { + source = filepath.Join(source, filepath.FromSlash(cleanSubPath)) + } + return mount.Mount{ + Type: mount.TypeBind, + Source: source, + Target: target, + ReadOnly: readOnly, + BindOptions: &mount.BindOptions{CreateMountpoint: true}, + }, nil + default: + return mount.Mount{}, fmt.Errorf("unsupported docker root kind %q", ref.Kind) + } +} + +func CleanContainerRelativePath(relativePath string) (string, error) { + cleaned := filepath.ToSlash(filepath.Clean(strings.TrimPrefix(relativePath, "/"))) + if cleaned == "." || cleaned == ".." || strings.HasPrefix(cleaned, "../") { + return "", fmt.Errorf("invalid data file path %q", relativePath) + } + return cleaned, nil +} + +func cleanRootSubPath(subPath string) (string, error) { + if subPath == "" || subPath == "." { + return "", nil + } + cleaned := filepath.ToSlash(filepath.Clean(strings.TrimPrefix(subPath, "/"))) + if cleaned == "." || cleaned == ".." || strings.HasPrefix(cleaned, "../") { + return "", fmt.Errorf("invalid docker mount subpath %q", subPath) + } + return cleaned, nil +} + +func sanitizeVolumePart(value string) string { + re := regexp.MustCompile(`[^a-zA-Z0-9_.-]+`) + value = re.ReplaceAllString(value, "-") + return strings.Trim(value, "-_.") +} diff --git a/internal/runtime/docker/storage_test.go b/internal/runtime/docker/storage_test.go new file mode 100644 index 00000000..4cad7f6e --- /dev/null +++ b/internal/runtime/docker/storage_test.go @@ -0,0 +1,109 @@ +package docker + +import ( + "path/filepath" + "reflect" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestConfigWithDefaultsUsesDockerVolumeStorage(t *testing.T) { + config := Config{}.WithDefaults() + if config.Storage != StorageVolume { + t.Fatalf("storage = %s, want volume", config.Storage) + } + if config.VolumePrefix != "druid" { + t.Fatalf("volume prefix = %s, want druid", config.VolumePrefix) + } +} + +func TestRuntimeRootRefUsesVolumeByDefault(t *testing.T) { + root, err := (Config{VolumePrefix: "lab"}).RuntimeRootRef("scroll a") + if err != nil { + t.Fatal(err) + } + if root != "docker-volume://lab-scroll-a-data" { + t.Fatalf("root = %s", root) + } +} + +func TestRuntimeRootRefUsesBindRoot(t *testing.T) { + bindRoot := filepath.Join(t.TempDir(), "roots") + root, err := (Config{Storage: StorageBind, BindRoot: bindRoot}).RuntimeRootRef("scroll-a") + if err != nil { + t.Fatal(err) + } + if root != "docker-bind://"+filepath.Join(bindRoot, "scroll-a") { + t.Fatalf("root = %s", root) + } +} + +func TestParseRootRefSupportsVolumeBindAndLocalBindPath(t *testing.T) { + cases := map[string]RootRef{ + "docker-volume://druid-scroll-data": {Kind: StorageVolume, Source: "druid-scroll-data"}, + "docker-bind:///tmp/druid/scroll": {Kind: StorageBind, Source: "/tmp/druid/scroll"}, + "/tmp/druid/local": {Kind: StorageBind, Source: "/tmp/druid/local"}, + } + for input, want := range cases { + got, err := ParseRootRef(input) + if err != nil { + t.Fatalf("ParseRootRef(%q): %v", input, err) + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("ParseRootRef(%q) = %#v, want %#v", input, got, want) + } + } +} + +func TestDockerMountUsesVolumeSubpath(t *testing.T) { + got, err := DockerMount("docker-volume://druid-scroll-data", "/site", true, "data/site") + if err != nil { + t.Fatal(err) + } + want := mount.Mount{ + Type: mount.TypeVolume, + Source: "druid-scroll-data", + Target: "/site", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{Subpath: "data/site"}, + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("mount = %#v, want %#v", got, want) + } +} + +func TestDockerMountUsesBindSubpath(t *testing.T) { + got, err := DockerMount("docker-bind:///tmp/druid/scroll", "/site", false, "data/site") + if err != nil { + t.Fatal(err) + } + want := mount.Mount{ + Type: mount.TypeBind, + Source: "/tmp/druid/scroll/data/site", + Target: "/site", + BindOptions: &mount.BindOptions{CreateMountpoint: true}, + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("mount = %#v, want %#v", got, want) + } +} + +func TestDockerMountDotSubpathMountsRuntimeRoot(t *testing.T) { + got, err := DockerMount("docker-volume://druid-scroll-data", "/runtime", false, procedureDataSubPath(".")) + if err != nil { + t.Fatal(err) + } + if got.VolumeOptions != nil { + t.Fatalf("dot subpath should mount the volume root, got %#v", got.VolumeOptions) + } +} + +func TestCleanContainerRelativePathRejectsTraversal(t *testing.T) { + if _, err := CleanContainerRelativePath("../escape"); err == nil { + t.Fatal("expected traversal error") + } + if got, err := CleanContainerRelativePath("/data/file.txt"); err != nil || got != "data/file.txt" { + t.Fatalf("cleaned = %s err=%v", got, err) + } +} diff --git a/internal/runtime/kubernetes/backend.go b/internal/runtime/kubernetes/backend.go index 709fc25a..38600ee5 100644 --- a/internal/runtime/kubernetes/backend.go +++ b/internal/runtime/kubernetes/backend.go @@ -3,11 +3,11 @@ package kubernetes import ( "bufio" "context" - "encoding/base64" + "encoding/json" "errors" "fmt" "io" - "path" + "sort" "strings" "time" @@ -18,21 +18,24 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" k8sclient "k8s.io/client-go/kubernetes" + k8sscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" - coreservices "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/utils/logger" "go.uber.org/zap" ) type Backend struct { client k8sclient.Interface + restConfig *rest.Config consoleManager ports.ConsoleManagerInterface config Config hubble HubbleClient + jobLogRunner func(context.Context, *batchv1.Job) ([]byte, error) } func New(config Config, consoleManager ports.ConsoleManagerInterface) (*Backend, error) { @@ -54,9 +57,10 @@ func New(config Config, consoleManager ports.ConsoleManagerInterface) (*Backend, if _, err := client.Discovery().ServerVersion(); err != nil { return nil, fmt.Errorf("kubernetes API unavailable: %w", err) } - logger.Log().Info("Using Kubernetes runtime config", zap.String("source", source), zap.String("namespace", config.Namespace)) + logger.Log().Info("Using Kubernetes backend settings", zap.String("source", source), zap.String("namespace", config.Namespace)) backend := &Backend{ client: client, + restConfig: restConfig, consoleManager: consoleManager, config: config, hubble: NewHubbleRelayClient(config.HubbleRelayAddr), @@ -131,88 +135,103 @@ func (b *Backend) Name() string { return "kubernetes" } -func (b *Backend) MaterializeScroll(ctx context.Context, artifact string, requestedName string) (*ports.RuntimeMaterialization, error) { +func (b *Backend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorkerAction) error { if err := b.config.ValidateForMaterialization(); err != nil { - return nil, err + return err } - stagePVC := stagingPVCName(artifact + requestedName) - if err := b.ensurePVC(ctx, stagePVC); err != nil { - return nil, err + if action.MountPath == "" { + action.MountPath = "/scroll" } - pullJob := pullJobSpec(b.config.Namespace, jobName("pull", ref(b.config.Namespace, stagePVC), shortHash(artifact)), stagePVC, b.config.PullImage, artifact, b.config.RegistrySecret, b.config.RegistryPlainHTTP) - if err := b.runHelperJob(ctx, pullJob); err != nil { - return nil, err - } - scrollYAML, err := b.ReadScrollFile(ref(b.config.Namespace, stagePVC)) + namespace, pvc, err := parseRef(action.RootRef) if err != nil { - return nil, err + return err } - scroll, err := domain.NewScrollFromBytes("", scrollYAML) - if err != nil { - return nil, err + if action.Mode == ports.RuntimeWorkerModeCreate { + if err := b.ensurePVC(ctx, pvc); err != nil { + return err + } } - id, err := coreservices.RuntimeScrollID(requestedName, scroll.Name) + registryConfigSecret, cleanupRegistryConfig, err := b.createRegistryConfigSecret(ctx, namespace, action.Artifact+action.RuntimeID, action.RegistryCredentials) if err != nil { - return nil, err - } - finalPVC := dataPVCName(id) - if err := b.ensurePVC(ctx, finalPVC); err != nil { - return nil, err - } - copyJob := copyPVCJobSpec(b.config.Namespace, jobName("copy", ref(b.config.Namespace, finalPVC), shortHash(stagePVC)), stagePVC, finalPVC, b.config.HelperImage) - if err := b.runHelperJob(ctx, copyJob); err != nil { - return nil, err + return err } - _ = b.client.CoreV1().PersistentVolumeClaims(b.config.Namespace).Delete(ctx, stagePVC, metav1.DeleteOptions{}) - return &ports.RuntimeMaterialization{ - Artifact: artifact, - ScrollRoot: ref(b.config.Namespace, finalPVC), - DataRoot: ref(b.config.Namespace, finalPVC), - ScrollYAML: scrollYAML, - }, nil + defer cleanupRegistryConfig() + job := workerPullJobSpec(namespace, jobName("worker-pull", action.RootRef, shortHash(string(action.Mode)+action.Artifact)), pvc, b.config.PullImage, action, b.config.RegistrySecret, registryConfigSecret, b.config.RegistryPlainHTTP) + return b.runHelperJob(ctx, job) } -func (b *Backend) ReadScrollFile(scrollRoot string) ([]byte, error) { - namespace, pvc, err := parseRef(scrollRoot) +func (b *Backend) ReadScrollFile(root string) ([]byte, error) { + namespace, pvc, err := parseRef(root) if err != nil { return nil, err } - job := readScrollJobSpec(namespace, jobName("read", scrollRoot, "scroll-yaml"), pvc, b.config.HelperImage) + job := readScrollJobSpec(namespace, jobName("read", root, "scroll-yaml"), pvc, b.config.HelperImage) return b.runJobAndLogs(context.Background(), job) } -func (b *Backend) ReadDataFile(ctx context.Context, dataRoot string, relativePath string) ([]byte, error) { - cleaned, err := cleanDataPath(relativePath) - if err != nil { - return nil, err +func (b *Backend) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { + if b.config.PullImage == "" { + return fmt.Errorf("kubernetes dev requires --k8s-pull-image or DRUID_K8S_PULL_IMAGE") } - namespace, pvc, err := parseRef(dataRoot) - if err != nil { - return nil, err + if action.MountPath == "" { + action.MountPath = "/scroll" } - job := readDataFileJobSpec(namespace, jobName("read-file", dataRoot, shortHash(cleaned)), pvc, b.config.HelperImage, cleaned) - return b.runJobAndLogs(ctx, job) -} - -func (b *Backend) WriteDataFile(ctx context.Context, dataRoot string, relativePath string, data []byte) error { - cleaned, err := cleanDataPath(relativePath) + if action.Listen == "" { + action.Listen = ":8084" + } + namespace, pvc, err := parseRef(action.RootRef) if err != nil { return err } - namespace, pvc, err := parseRef(dataRoot) + sts := devStatefulSetSpec(namespace, action.RootRef, pvc, b.config.PullImage, action, b.config.RegistrySecret) + existing, err := b.client.AppsV1().StatefulSets(namespace).Get(ctx, sts.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + if _, err := b.client.AppsV1().StatefulSets(namespace).Create(ctx, sts, metav1.CreateOptions{}); err != nil { + return err + } + case err != nil: + return err + default: + sts.ResourceVersion = existing.ResourceVersion + if _, err := b.client.AppsV1().StatefulSets(namespace).Update(ctx, sts, metav1.UpdateOptions{}); err != nil { + return err + } + } + service := devServiceSpec(namespace, action.RootRef, pvc) + if err := b.reconcileService(ctx, service); err != nil { + return err + } + return b.waitForStatefulSet(ctx, sts.Name) +} + +func (b *Backend) StopDev(ctx context.Context, root string) error { + namespace, _, err := parseRef(root) if err != nil { return err } - job := writeDataFileJobSpec(namespace, jobName("write-file", dataRoot, shortHash(cleaned)), pvc, b.config.HelperImage, cleaned, base64.StdEncoding.EncodeToString(data)) - return b.runHelperJob(ctx, job) + propagation := metav1.DeletePropagationBackground + _ = b.client.AppsV1().StatefulSets(namespace).Delete(ctx, devStatefulSetName(root), metav1.DeleteOptions{PropagationPolicy: &propagation}) + _ = b.client.CoreV1().Services(namespace).Delete(ctx, serviceName(root, "dev", "webdav"), metav1.DeleteOptions{}) + return nil } -func cleanDataPath(relativePath string) (string, error) { - cleaned := path.Clean(strings.TrimPrefix(relativePath, "/")) - if cleaned == "." || cleaned == ".." || strings.HasPrefix(cleaned, "../") { - return "", fmt.Errorf("invalid data file path %q", relativePath) +func (b *Backend) reconcileService(ctx context.Context, service *corev1.Service) error { + existing, err := b.client.CoreV1().Services(service.Namespace).Get(ctx, service.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + _, err := b.client.CoreV1().Services(service.Namespace).Create(ctx, service, metav1.CreateOptions{}) + return err + case err != nil: + return err } - return cleaned, nil + service.ResourceVersion = existing.ResourceVersion + service.Spec.ClusterIP = existing.Spec.ClusterIP + service.Spec.ClusterIPs = existing.Spec.ClusterIPs + service.Spec.IPFamilies = existing.Spec.IPFamilies + service.Spec.IPFamilyPolicy = existing.Spec.IPFamilyPolicy + _, err = b.client.CoreV1().Services(service.Namespace).Update(ctx, service, metav1.UpdateOptions{}) + return err } func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { @@ -224,7 +243,7 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { } if command.Command.Run == domain.RunModePersistent { if procedure.IsSignal() { - if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.DataRoot); err != nil { + if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.Root); err != nil { return nil, err } continue @@ -232,12 +251,12 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { if procedure.Image == "" { return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) } - if err := b.ensurePersistentProcedure(context.Background(), command.ScrollID, command.DataRoot, procedureName, procedure, command.GlobalPorts, env); err != nil { + if err := b.ensurePersistentProcedure(context.Background(), command.ScrollID, command.Root, procedureName, procedure, command.GlobalPorts, env); err != nil { return nil, err } continue } - exitCode, err := b.runJobProcedure(command.ScrollID, procedureName, procedure, command.DataRoot, command.GlobalPorts, env) + exitCode, err := b.runJobProcedure(command.ScrollID, procedureName, procedure, command.Root, command.GlobalPorts, env) if err != nil { return exitCode, err } @@ -251,18 +270,18 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { return nil, nil } -func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedure *domain.Procedure, dataRoot string, globalPorts []domain.Port, env map[string]string) (*int, error) { +func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { if procedure.IsSignal() { - return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, dataRoot) + return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, root) } if procedure.Image == "" { return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) } ctx := context.Background() - if err := b.ensureExpectedServices(ctx, dataRoot, procedureName, procedure, globalPorts); err != nil { + if err := b.ensureExpectedServices(ctx, root, procedureName, procedure, globalPorts); err != nil { return nil, err } - job, err := procedureJobSpec(b.config.Namespace, dataRoot, procedureName, procedure, env, b.config.RegistrySecret) + job, err := procedureJobSpec(b.config.Namespace, root, procedureName, procedure, env, b.config.RegistrySecret) if err != nil { return nil, err } @@ -274,7 +293,7 @@ func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedu consoleID := runtimeConsoleID(scrollID, procedureName) console, doneChan := b.consoleManager.AddConsoleWithChannel(consoleID, domain.ConsoleTypeContainer, "stdin", output) console.WriteInput = func(data string) error { - return b.Attach(procedureName, data) + return b.attachToProcedure(root, procedureName, data) } streamStarted := false podName, err := b.waitForJobPod(ctx, job.Name, string(createdJob.UID)) @@ -296,11 +315,11 @@ func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedu return exitCode, nil } -func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string, dataRoot string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port, env map[string]string) error { - if err := b.ensureExpectedServices(ctx, dataRoot, procedureName, procedure, globalPorts); err != nil { +func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string, root string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port, env map[string]string) error { + if err := b.ensureExpectedServices(ctx, root, procedureName, procedure, globalPorts); err != nil { return err } - statefulSet, err := procedureStatefulSetSpec(b.config.Namespace, dataRoot, procedureName, procedure, env, b.config.RegistrySecret) + statefulSet, err := procedureStatefulSetSpec(b.config.Namespace, root, procedureName, procedure, env, b.config.RegistrySecret) if err != nil { return err } @@ -321,7 +340,7 @@ func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string output := make(chan string, 100) console, _ := b.consoleManager.AddConsoleWithChannel(runtimeConsoleID(scrollID, procedureName), domain.ConsoleTypeContainer, "stdin", output) console.WriteInput = func(data string) error { - return b.Attach(procedureName, data) + return b.attachToProcedure(root, procedureName, data) } if err := b.waitForStatefulSet(ctx, statefulSet.Name); err != nil { close(output) @@ -342,8 +361,8 @@ func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string return nil } -func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { - _, pvc, err := parseRef(dataRoot) +func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { + _, pvc, err := parseRef(root) if err != nil { return nil, err } @@ -379,7 +398,7 @@ func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.Com KeepAliveTraffic: expectedPort.KeepAliveTraffic, Source: "kubernetes-service", } - serviceReady, hostPort := b.serviceReady(context.Background(), serviceName(dataRoot, procedureName, expectedPort.Name)) + serviceReady, hostPort := b.serviceReady(context.Background(), serviceName(root, procedureName, expectedPort.Name)) status.Bound = serviceReady status.HostPort = hostPort if !hubbleAvailable { @@ -423,14 +442,36 @@ func (b *Backend) ExpectedPorts(dataRoot string, commands map[string]*domain.Com return statuses, nil } -func (b *Backend) RoutingTargets(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { - namespace, pvc, err := parseRef(dataRoot) +func (b *Backend) RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { + namespace, pvc, err := parseRef(root) if err != nil { return nil, err } portsByName := portsByName(globalPorts) - targets := []domain.RuntimeRoutingTarget{} - for commandName, command := range commands { + targets := []domain.RuntimeRoutingTarget{{ + Name: "webdav", + Procedure: "dev", + PortName: "webdav", + Port: 8084, + Protocol: "https", + Namespace: namespace, + ServiceName: serviceName(root, "dev", "webdav"), + ServicePort: 8084, + Selector: map[string]string{ + labelManagedBy: "druid", + labelComponent: "runtime", + labelScrollID: dnsLabel(pvc), + labelProcedure: "dev", + }, + }} + seen := map[string]struct{}{"webdav": {}} + commandNames := make([]string, 0, len(commands)) + for commandName := range commands { + commandNames = append(commandNames, commandName) + } + sort.Strings(commandNames) + for _, commandName := range commandNames { + command := commands[commandName] if command == nil { continue } @@ -440,25 +481,34 @@ func (b *Backend) RoutingTargets(dataRoot string, commands map[string]*domain.Co } procedureName := domain.ProcedureName(commandName, idx, procedure) for _, expectedPort := range procedure.ExpectedPorts { + if _, ok := seen[expectedPort.Name]; ok { + continue + } port, ok := portsByName[expectedPort.Name] if !ok { return nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) } + seen[expectedPort.Name] = struct{}{} + selector := map[string]string{ + labelManagedBy: "druid", + labelComponent: "runtime", + labelScrollID: dnsLabel(pvc), + } + if len(procedure.ExpectedPorts) == 1 { + selector[labelPortName] = dnsLabel(expectedPort.Name) + } else { + selector[labelProcedure] = dnsLabel(procedureName) + } targets = append(targets, domain.RuntimeRoutingTarget{ - Name: fmt.Sprintf("%s-%s", procedureName, expectedPort.Name), + Name: expectedPort.Name, Procedure: procedureName, PortName: expectedPort.Name, Port: port.Port, Protocol: normalizeProtocol(port.Protocol), Namespace: namespace, - ServiceName: serviceName(dataRoot, procedureName, expectedPort.Name), + ServiceName: serviceName(root, procedureName, expectedPort.Name), ServicePort: port.Port, - Selector: map[string]string{ - labelManagedBy: "druid", - labelComponent: "runtime", - labelScrollID: dnsLabel(pvc), - labelProcedure: dnsLabel(procedureName), - }, + Selector: selector, }) } } @@ -466,29 +516,29 @@ func (b *Backend) RoutingTargets(dataRoot string, commands map[string]*domain.Co return targets, nil } -func (b *Backend) StopRuntime(dataRoot string) error { +func (b *Backend) StopRuntime(root string) error { propagation := metav1.DeletePropagationBackground options := metav1.DeleteOptions{PropagationPolicy: &propagation} - if err := b.deleteRuntimeJobs(context.Background(), dataRoot, options); err != nil { + if err := b.deleteRuntimeJobs(context.Background(), root, options); err != nil { return err } - if err := b.deleteRuntimeStatefulSets(context.Background(), dataRoot, options); err != nil { + if err := b.deleteRuntimeStatefulSets(context.Background(), root, options); err != nil { return err } - return b.deleteRuntimePodsByScroll(context.Background(), dataRoot, options) + return b.deleteRuntimePodsByScroll(context.Background(), root, options) } -func (b *Backend) DeleteRuntime(dataRoot string, purgeData bool) error { +func (b *Backend) DeleteRuntime(root string, purgeData bool) error { propagation := metav1.DeletePropagationBackground options := metav1.DeleteOptions{PropagationPolicy: &propagation} - if err := b.StopRuntime(dataRoot); err != nil { + if err := b.StopRuntime(root); err != nil { return err } - if err := b.deleteRuntimeServices(context.Background(), dataRoot, options); err != nil { + if err := b.deleteRuntimeServices(context.Background(), root, options); err != nil { return err } if purgeData { - namespace, pvc, err := parseRef(dataRoot) + namespace, pvc, err := parseRef(root) if err != nil { return err } @@ -500,79 +550,136 @@ func (b *Backend) DeleteRuntime(dataRoot string, purgeData bool) error { return nil } -func (b *Backend) BackupRuntime(ctx context.Context, dataRoot string, artifact string) error { +func (b *Backend) BackupRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { if artifact == "" { return fmt.Errorf("backup artifact is required") } if b.config.PullImage == "" { return b.config.ValidateForMaterialization() } - namespace, pvc, err := parseRef(dataRoot) + namespace, pvc, err := parseRef(root) + if err != nil { + return err + } + registryConfigSecret, cleanupRegistryConfig, err := b.createRegistryConfigSecret(ctx, namespace, artifact+root, registryCredentials) if err != nil { return err } - job := backupJobSpec(namespace, jobName("backup", dataRoot, shortHash(artifact)), pvc, b.config.PullImage, artifact, b.config.RegistrySecret, b.config.RegistryPlainHTTP) + defer cleanupRegistryConfig() + job := backupJobSpec(namespace, jobName("backup", root, shortHash(artifact)), pvc, b.config.PullImage, artifact, b.config.RegistrySecret, registryConfigSecret, b.config.RegistryPlainHTTP) return b.runHelperJob(ctx, job) } -func (b *Backend) RestoreRuntime(ctx context.Context, dataRoot string, artifact string) error { +func (b *Backend) RestoreRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { if artifact == "" { return fmt.Errorf("restore artifact is required") } if err := b.config.ValidateForMaterialization(); err != nil { return err } - namespace, pvc, err := parseRef(dataRoot) + namespace, pvc, err := parseRef(root) if err != nil { return err } - stagePVC := stagingPVCName("restore:" + dataRoot + ":" + artifact) + stagePVC := stagingPVCName("restore:" + root + ":" + artifact) if err := b.ensurePVC(ctx, stagePVC); err != nil { return err } defer b.client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.Background(), stagePVC, metav1.DeleteOptions{}) - pullJob := pullJobSpec(namespace, jobName("restore-pull", ref(namespace, stagePVC), shortHash(artifact)), stagePVC, b.config.PullImage, artifact, b.config.RegistrySecret, b.config.RegistryPlainHTTP) + registryConfigSecret, cleanupRegistryConfig, err := b.createRegistryConfigSecret(ctx, namespace, artifact+root, registryCredentials) + if err != nil { + return err + } + defer cleanupRegistryConfig() + pullJob := pullJobSpec(namespace, jobName("restore-pull", ref(namespace, stagePVC), shortHash(artifact)), stagePVC, b.config.PullImage, artifact, b.config.RegistrySecret, registryConfigSecret, b.config.RegistryPlainHTTP) if err := b.runHelperJob(ctx, pullJob); err != nil { return err } - if err := b.StopRuntime(dataRoot); err != nil { + if err := b.StopRuntime(root); err != nil { return err } - restoreJob := replacePVCJobSpec(namespace, jobName("restore-copy", dataRoot, shortHash(artifact)), stagePVC, pvc, b.config.HelperImage) + restoreJob := replacePVCJobSpec(namespace, jobName("restore-copy", root, shortHash(artifact)), stagePVC, pvc, b.config.HelperImage) return b.runHelperJob(ctx, restoreJob) } func (b *Backend) Attach(commandName string, data string) error { - return fmt.Errorf("kubernetes attach is not implemented for console %s: pod attach/exec support is required", commandName) + pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{labelProcedure: dnsLabel(commandName)}).String(), + }) + if err != nil { + return err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + return b.attachToPod(context.Background(), pod.Name, data) + } + } + return fmt.Errorf("no running pod found for console %s", commandName) +} + +func (b *Backend) attachToProcedure(root string, procedureName string, data string) error { + _, pvc, err := parseRef(root) + if err != nil { + return err + } + selector := baseLabels(pvc) + selector[labelProcedure] = dnsLabel(procedureName) + podName, err := b.waitForPodBySelector(context.Background(), labels.SelectorFromSet(selector).String()) + if err != nil { + return err + } + return b.attachToPod(context.Background(), podName, data) } -func (b *Backend) Signal(_ string, target string, signal string, dataRoot string) error { +func (b *Backend) attachToPod(ctx context.Context, podName string, data string) error { + req := b.client.CoreV1().RESTClient().Post(). + Resource("pods"). + Namespace(b.config.Namespace). + Name(podName). + SubResource("attach"). + VersionedParams(&corev1.PodAttachOptions{ + Container: "main", + Stdin: true, + Stdout: false, + Stderr: false, + TTY: false, + }, k8sscheme.ParameterCodec) + exec, err := remotecommand.NewSPDYExecutor(b.restConfig, "POST", req.URL()) + if err != nil { + return err + } + return exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdin: strings.NewReader(data), + }) +} + +func (b *Backend) Signal(_ string, target string, signal string, root string) error { if target == "" { return nil } switch signal { case "", "SIGTERM", "TERM": propagation := metav1.DeletePropagationBackground - return b.deleteRuntimeWorkload(context.Background(), dataRoot, target, metav1.DeleteOptions{PropagationPolicy: &propagation}) + return b.deleteRuntimeWorkload(context.Background(), root, target, metav1.DeleteOptions{PropagationPolicy: &propagation}) case "SIGKILL", "KILL": grace := int64(0) propagation := metav1.DeletePropagationBackground - return b.deleteRuntimeWorkload(context.Background(), dataRoot, target, metav1.DeleteOptions{GracePeriodSeconds: &grace, PropagationPolicy: &propagation}) + return b.deleteRuntimeWorkload(context.Background(), root, target, metav1.DeleteOptions{GracePeriodSeconds: &grace, PropagationPolicy: &propagation}) default: return fmt.Errorf("kubernetes signal %s is unsupported without pod exec", signal) } } -func (b *Backend) deleteRuntimeWorkload(ctx context.Context, dataRoot string, target string, options metav1.DeleteOptions) error { - jobErr := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, jobName("proc", dataRoot, target), options) +func (b *Backend) deleteRuntimeWorkload(ctx context.Context, root string, target string, options metav1.DeleteOptions) error { + jobErr := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, jobName("proc", root, target), options) if apierrors.IsNotFound(jobErr) { jobErr = nil } - statefulSetErr := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, statefulSetName(dataRoot, target), options) + statefulSetErr := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, statefulSetName(root, target), options) if apierrors.IsNotFound(statefulSetErr) { statefulSetErr = nil } - podErr := b.deleteRuntimePods(ctx, dataRoot, target, options) + podErr := b.deleteRuntimePods(ctx, root, target, options) if jobErr != nil { return jobErr } @@ -582,8 +689,8 @@ func (b *Backend) deleteRuntimeWorkload(ctx context.Context, dataRoot string, ta return podErr } -func (b *Backend) deleteRuntimePods(ctx context.Context, dataRoot string, target string, options metav1.DeleteOptions) error { - _, pvc, err := parseRef(dataRoot) +func (b *Backend) deleteRuntimePods(ctx context.Context, root string, target string, options metav1.DeleteOptions) error { + _, pvc, err := parseRef(root) if err != nil { return err } @@ -603,8 +710,8 @@ func (b *Backend) deleteRuntimePods(ctx context.Context, dataRoot string, target return nil } -func (b *Backend) deleteRuntimeJobs(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { - return b.deleteRuntimeObjects(ctx, dataRoot, func(name string) error { +func (b *Backend) deleteRuntimeJobs(ctx context.Context, root string, options metav1.DeleteOptions) error { + return b.deleteRuntimeObjects(ctx, root, func(name string) error { err := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, name, options) if apierrors.IsNotFound(err) { return nil @@ -613,8 +720,8 @@ func (b *Backend) deleteRuntimeJobs(ctx context.Context, dataRoot string, option }, "jobs") } -func (b *Backend) deleteRuntimeStatefulSets(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { - return b.deleteRuntimeObjects(ctx, dataRoot, func(name string) error { +func (b *Backend) deleteRuntimeStatefulSets(ctx context.Context, root string, options metav1.DeleteOptions) error { + return b.deleteRuntimeObjects(ctx, root, func(name string) error { err := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, name, options) if apierrors.IsNotFound(err) { return nil @@ -623,8 +730,8 @@ func (b *Backend) deleteRuntimeStatefulSets(ctx context.Context, dataRoot string }, "statefulsets") } -func (b *Backend) deleteRuntimeServices(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { - return b.deleteRuntimeObjects(ctx, dataRoot, func(name string) error { +func (b *Backend) deleteRuntimeServices(ctx context.Context, root string, options metav1.DeleteOptions) error { + return b.deleteRuntimeObjects(ctx, root, func(name string) error { err := b.client.CoreV1().Services(b.config.Namespace).Delete(ctx, name, options) if apierrors.IsNotFound(err) { return nil @@ -633,8 +740,8 @@ func (b *Backend) deleteRuntimeServices(ctx context.Context, dataRoot string, op }, "services") } -func (b *Backend) deleteRuntimeObjects(ctx context.Context, dataRoot string, deleteOne func(name string) error, kind string) error { - _, pvc, err := parseRef(dataRoot) +func (b *Backend) deleteRuntimeObjects(ctx context.Context, root string, deleteOne func(name string) error, kind string) error { + _, pvc, err := parseRef(root) if err != nil { return err } @@ -676,8 +783,8 @@ func (b *Backend) deleteRuntimeObjects(ctx context.Context, dataRoot string, del return nil } -func (b *Backend) deleteRuntimePodsByScroll(ctx context.Context, dataRoot string, options metav1.DeleteOptions) error { - _, pvc, err := parseRef(dataRoot) +func (b *Backend) deleteRuntimePodsByScroll(ctx context.Context, root string, options metav1.DeleteOptions) error { + _, pvc, err := parseRef(root) if err != nil { return err } @@ -705,12 +812,49 @@ func (b *Backend) ensurePVC(ctx context.Context, name string) error { return err } +func (b *Backend) createRegistryConfigSecret(ctx context.Context, namespace string, seed string, credentials []domain.RegistryCredential) (string, func(), error) { + if len(credentials) == 0 { + return "", func() {}, nil + } + data, err := json.Marshal(struct { + Registries []domain.RegistryCredential `json:"registries"` + }{Registries: credentials}) + if err != nil { + return "", nil, err + } + name := dnsLabel("druid-registry-" + shortHash(fmt.Sprintf("%s-%d", seed, time.Now().UnixNano()))) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + labelManagedBy: "druid", + labelComponent: "registry-auth", + }, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{registryConfigSecretKey: data}, + } + if _, err := b.client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { + return "", nil, err + } + cleanup := func() { + deleteCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _ = b.client.CoreV1().Secrets(namespace).Delete(deleteCtx, name, metav1.DeleteOptions{}) + } + return name, cleanup, nil +} + func (b *Backend) runHelperJob(ctx context.Context, job *batchv1.Job) error { _, err := b.runJobAndLogs(ctx, job) return err } func (b *Backend) runJobAndLogs(ctx context.Context, job *batchv1.Job) ([]byte, error) { + if b.jobLogRunner != nil { + return b.jobLogRunner(ctx, job) + } createdJob, err := b.createFreshJob(ctx, job) if err != nil { return nil, err @@ -894,14 +1038,14 @@ func (b *Backend) streamPodLogs(ctx context.Context, podName string, output chan } } -func (b *Backend) ensureExpectedServices(ctx context.Context, dataRoot string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port) error { +func (b *Backend) ensureExpectedServices(ctx context.Context, root string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port) error { ports := portsByName(globalPorts) for _, expected := range procedure.ExpectedPorts { port, ok := ports[expected.Name] if !ok { return fmt.Errorf("expected port %s is not defined in top-level ports", expected.Name) } - service, err := serviceSpec(b.config.Namespace, dataRoot, procedureName, expected.Name, port) + service, err := serviceSpec(b.config.Namespace, root, procedureName, expected.Name, port) if err != nil { return err } diff --git a/internal/runtime/kubernetes/hubble.go b/internal/runtime/kubernetes/hubble.go index 85bcb8a4..24d2ba86 100644 --- a/internal/runtime/kubernetes/hubble.go +++ b/internal/runtime/kubernetes/hubble.go @@ -73,19 +73,14 @@ func (c *HubbleRelayClient) HasFlow(ctx context.Context, query TrafficQuery) (bo if err != nil { return false, err } - for { - _, err := stream.Recv() - if err == nil { - return true, nil - } - if ctx.Err() != nil { - return false, nil - } - if errors.Is(err, io.EOF) { - return false, nil - } - return false, err + _, err = stream.Recv() + if err == nil { + return true, nil + } + if ctx.Err() != nil || errors.Is(err, io.EOF) { + return false, nil } + return false, err } func normalizeProtocol(protocol string) string { diff --git a/internal/runtime/kubernetes/names.go b/internal/runtime/kubernetes/names.go index a6491d09..083cde66 100644 --- a/internal/runtime/kubernetes/names.go +++ b/internal/runtime/kubernetes/names.go @@ -47,16 +47,20 @@ func stagingPVCName(artifact string) string { return dnsLabel("druid-stage-" + shortHash(artifact)) } -func jobName(prefix string, dataRoot string, procedureName string) string { - return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", prefix, refPVCName(dataRoot), procedureName)) +func jobName(prefix string, root string, procedureName string) string { + return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", prefix, refPVCName(root), procedureName)) } -func statefulSetName(dataRoot string, procedureName string) string { - return dnsLabel(fmt.Sprintf("druid-sts-%s-%s", refPVCName(dataRoot), procedureName)) +func statefulSetName(root string, procedureName string) string { + return dnsLabel(fmt.Sprintf("druid-sts-%s-%s", refPVCName(root), procedureName)) } -func serviceName(dataRoot string, procedureName string, portName string) string { - return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", refPVCName(dataRoot), procedureName, portName)) +func devStatefulSetName(root string) string { + return dnsLabel(fmt.Sprintf("druid-dev-%s", refPVCName(root))) +} + +func serviceName(root string, procedureName string, portName string) string { + return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", refPVCName(root), procedureName, portName)) } func ref(namespace string, pvc string) string { @@ -87,7 +91,11 @@ func mountSubPath(mountSubPath string) string { if mountSubPath == "" { return "data" } - return path.Join("data", mountSubPath) + clean := path.Clean(strings.TrimPrefix(mountSubPath, "/")) + if clean == "." || clean == "data" || strings.HasPrefix(clean, "data/") { + return clean + } + return path.Join("data", clean) } func baseLabels(scrollID string) map[string]string { diff --git a/internal/runtime/kubernetes/resources.go b/internal/runtime/kubernetes/resources.go index 4a430d2d..8341e6dd 100644 --- a/internal/runtime/kubernetes/resources.go +++ b/internal/runtime/kubernetes/resources.go @@ -1,7 +1,6 @@ package kubernetes import ( - "path" "path/filepath" "sort" @@ -13,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" ) func pvcSpec(namespace string, name string, storageClass string) *corev1.PersistentVolumeClaim { @@ -36,22 +36,84 @@ func pvcSpec(namespace string, name string, storageClass string) *corev1.Persist } } -func pullJobSpec(namespace string, jobName string, pvc string, image string, artifact string, registrySecret string, registryPlainHTTP bool) *batchv1.Job { - command := []string{"druid-client", "pull", artifact, "/scroll"} - job := helperJobSpec(namespace, jobName, pvc, image, command, registrySecret, map[string]string{ +const ( + registryConfigEnvName = "DRUID_RUNTIME_REGISTRY_CONFIG_JSON" + registryConfigSecretKey = "config.json" + registryConfigScript = `printf '%s' "$DRUID_RUNTIME_REGISTRY_CONFIG_JSON" > /tmp/druid-registry.json && exec druid --config /tmp/druid-registry.json "$@"` +) + +func workerPullJobSpec(namespace string, jobName string, pvc string, image string, action ports.RuntimeWorkerAction, imagePullSecret string, registryConfigSecret string, registryPlainHTTP bool) *batchv1.Job { + command := []string{ + "druid", "worker", "pull", + "--artifact", action.Artifact, + "--runtime-id", action.RuntimeID, + "--mode", string(action.Mode), + "--root", action.MountPath, + "--callback-url", action.CallbackURL, + } + if registryConfigSecret != "" { + command = append([]string{"sh", "-c", registryConfigScript, "sh"}, command[1:]...) + } + job := helperJobSpec(namespace, jobName, pvc, image, command, imagePullSecret, map[string]string{ + labelComponent: "worker-pull", + }) + container := &job.Spec.Template.Spec.Containers[0] + container.Env = append(container.Env, corev1.EnvVar{Name: "DRUID_WORKER_TOKEN", Value: action.CallbackToken}) + if registryConfigSecret != "" { + container.Env = append(container.Env, corev1.EnvVar{ + Name: registryConfigEnvName, + ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: registryConfigSecret}, + Key: registryConfigSecretKey, + }}, + }) + } + if registryPlainHTTP { + container.Env = append(container.Env, corev1.EnvVar{Name: "DRUID_REGISTRY_PLAIN_HTTP", Value: "true"}) + } + return job +} + +func pullJobSpec(namespace string, jobName string, pvc string, image string, artifact string, imagePullSecret string, registryConfigSecret string, registryPlainHTTP bool) *batchv1.Job { + command := []string{"druid", "pull", artifact, "/scroll"} + if registryConfigSecret != "" { + command = []string{"sh", "-c", registryConfigScript, "sh", "pull", artifact, "/scroll"} + } + job := helperJobSpec(namespace, jobName, pvc, image, command, imagePullSecret, map[string]string{ labelComponent: "materializer", }) + if registryConfigSecret != "" { + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: registryConfigEnvName, + ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: registryConfigSecret}, + Key: registryConfigSecretKey, + }}, + }) + } if registryPlainHTTP { job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "DRUID_REGISTRY_PLAIN_HTTP", Value: "true"}) } return job } -func backupJobSpec(namespace string, jobName string, pvc string, image string, artifact string, registrySecret string, registryPlainHTTP bool) *batchv1.Job { - command := []string{"druid-client", "push", artifact, "/scroll"} - job := helperJobSpec(namespace, jobName, pvc, image, command, registrySecret, map[string]string{ +func backupJobSpec(namespace string, jobName string, pvc string, image string, artifact string, imagePullSecret string, registryConfigSecret string, registryPlainHTTP bool) *batchv1.Job { + command := []string{"druid", "push", artifact, "/scroll"} + if registryConfigSecret != "" { + command = []string{"sh", "-c", registryConfigScript, "sh", "push", artifact, "/scroll"} + } + job := helperJobSpec(namespace, jobName, pvc, image, command, imagePullSecret, map[string]string{ labelComponent: "backup", }) + if registryConfigSecret != "" { + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: registryConfigEnvName, + ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: registryConfigSecret}, + Key: registryConfigSecretKey, + }}, + }) + } if registryPlainHTTP { job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "DRUID_REGISTRY_PLAIN_HTTP", Value: "true"}) } @@ -64,29 +126,6 @@ func readScrollJobSpec(namespace string, jobName string, pvc string, helperImage }) } -func readDataFileJobSpec(namespace string, jobName string, pvc string, helperImage string, relativePath string) *batchv1.Job { - return helperJobSpec(namespace, jobName, pvc, helperImage, []string{"cat", path.Join("/scroll", relativePath)}, "", map[string]string{ - labelComponent: "read-data-file", - }) -} - -func writeDataFileJobSpec(namespace string, jobName string, pvc string, helperImage string, relativePath string, encodedData string) *batchv1.Job { - job := helperJobSpec(namespace, jobName, pvc, helperImage, []string{ - "sh", - "-c", - `mkdir -p "$(dirname "$1")" && printf '%s' "$DRUID_DATA_FILE_B64" | base64 -d > "$1"`, - "sh", - path.Join("/scroll", relativePath), - }, "", map[string]string{ - labelComponent: "write-data-file", - }) - job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ - Name: "DRUID_DATA_FILE_B64", - Value: encodedData, - }) - return job -} - func copyPVCJobSpec(namespace string, jobName string, sourcePVC string, targetPVC string, helperImage string) *batchv1.Job { labels := map[string]string{ labelManagedBy: "druid", @@ -161,14 +200,17 @@ func helperJobSpec(namespace string, jobName string, pvc string, image string, c } } -func procedureJobSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*batchv1.Job, error) { - _, pvc, err := parseRef(dataRoot) +func procedureJobSpec(namespace string, root string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*batchv1.Job, error) { + _, pvc, err := parseRef(root) if err != nil { return nil, err } labels := baseLabels(pvc) labels[labelProcedure] = dnsLabel(procedureName) labels[labelCommand] = dnsLabel(procedureName) + if len(procedure.ExpectedPorts) == 1 { + labels[labelPortName] = dnsLabel(procedure.ExpectedPorts[0].Name) + } backoff := int32(0) container := corev1.Container{ Name: "main", @@ -176,7 +218,7 @@ func procedureJobSpec(namespace string, dataRoot string, procedureName string, p Command: procedure.Command, WorkingDir: procedure.WorkingDir, TTY: procedure.TTY, - Stdin: procedure.TTY, + Stdin: true, ImagePullPolicy: corev1.PullIfNotPresent, Env: envVars(env), VolumeMounts: volumeMounts(procedure.Mounts), @@ -191,7 +233,7 @@ func procedureJobSpec(namespace string, dataRoot string, procedureName string, p } return &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ - Name: jobName("proc", dataRoot, procedureName), + Name: jobName("proc", root, procedureName), Namespace: namespace, Labels: labels, }, @@ -205,14 +247,17 @@ func procedureJobSpec(namespace string, dataRoot string, procedureName string, p }, nil } -func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*appsv1.StatefulSet, error) { - _, pvc, err := parseRef(dataRoot) +func procedureStatefulSetSpec(namespace string, root string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*appsv1.StatefulSet, error) { + _, pvc, err := parseRef(root) if err != nil { return nil, err } labels := baseLabels(pvc) labels[labelProcedure] = dnsLabel(procedureName) labels[labelCommand] = dnsLabel(procedureName) + if len(procedure.ExpectedPorts) == 1 { + labels[labelPortName] = dnsLabel(procedure.ExpectedPorts[0].Name) + } replicas := int32(1) container := corev1.Container{ Name: "main", @@ -220,7 +265,7 @@ func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName s Command: procedure.Command, WorkingDir: procedure.WorkingDir, TTY: procedure.TTY, - Stdin: procedure.TTY, + Stdin: true, ImagePullPolicy: corev1.PullIfNotPresent, Env: envVars(env), VolumeMounts: volumeMounts(procedure.Mounts), @@ -234,13 +279,13 @@ func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName s } return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: statefulSetName(dataRoot, procedureName), + Name: statefulSetName(root, procedureName), Namespace: namespace, Labels: labels, }, Spec: appsv1.StatefulSetSpec{ Replicas: &replicas, - ServiceName: statefulSetName(dataRoot, procedureName), + ServiceName: statefulSetName(root, procedureName), Selector: &metav1.LabelSelector{MatchLabels: labels}, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, @@ -250,8 +295,78 @@ func procedureStatefulSetSpec(namespace string, dataRoot string, procedureName s }, nil } -func serviceSpec(namespace string, dataRoot string, procedureName string, portName string, port domain.Port) (*corev1.Service, error) { - _, pvc, err := parseRef(dataRoot) +func devStatefulSetSpec(namespace string, root string, pvc string, image string, action ports.RuntimeDevAction, registrySecret string) *appsv1.StatefulSet { + labels := baseLabels(pvc) + labels[labelProcedure] = "dev" + replicas := int32(1) + args := []string{"dev", "--root", action.MountPath, "--listen", action.Listen, "--runtime-id", action.RuntimeID, "--daemon-url", action.DaemonURL} + if action.DaemonToken != "" { + args = append(args, "--daemon-token", action.DaemonToken) + } + if action.OwnerID != "" { + args = append(args, "--owner-id", action.OwnerID) + } + if action.AuthJWKSURL != "" { + args = append(args, "--auth-jwks-url", action.AuthJWKSURL) + } + if action.RuntimeJWKSURL != "" { + args = append(args, "--runtime-jwks-url", action.RuntimeJWKSURL) + } + for _, watchPath := range action.WatchPaths { + args = append(args, "--watch", watchPath) + } + for _, command := range action.HotReloadCommands { + args = append(args, "--command", command) + } + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: image, + Command: []string{"druid"}, + Args: args, + ImagePullPolicy: corev1.PullIfNotPresent, + Ports: []corev1.ContainerPort{{Name: "webdav", ContainerPort: 8084}}, + VolumeMounts: []corev1.VolumeMount{{Name: "data", MountPath: action.MountPath}}, + }}, + Volumes: []corev1.Volume{pvcVolume("data", pvc)}, + } + if registrySecret != "" { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: registrySecret}} + } + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: devStatefulSetName(root), Namespace: namespace, Labels: labels}, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + ServiceName: devStatefulSetName(root), + Selector: &metav1.LabelSelector{MatchLabels: labels}, + Template: corev1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: labels}, Spec: podSpec}, + }, + } +} + +func devServiceSpec(namespace string, root string, pvc string) *corev1.Service { + labels := baseLabels(pvc) + labels[labelProcedure] = "dev" + labels[labelPortName] = "webdav" + selector := baseLabels(pvc) + selector[labelProcedure] = "dev" + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: serviceName(root, "dev", "webdav"), Namespace: namespace, Labels: labels}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: selector, + Ports: []corev1.ServicePort{{ + Name: "webdav", + Protocol: corev1.ProtocolTCP, + Port: 8084, + TargetPort: intstr.FromInt(8084), + }}, + }, + } +} + +func serviceSpec(namespace string, root string, procedureName string, portName string, port domain.Port) (*corev1.Service, error) { + _, pvc, err := parseRef(root) if err != nil { return nil, err } @@ -266,7 +381,7 @@ func serviceSpec(namespace string, dataRoot string, procedureName string, portNa } return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName(dataRoot, procedureName, portName), + Name: serviceName(root, procedureName, portName), Namespace: namespace, Labels: labels, }, diff --git a/internal/runtime/kubernetes/resources_test.go b/internal/runtime/kubernetes/resources_test.go index 785849af..61dba1c0 100644 --- a/internal/runtime/kubernetes/resources_test.go +++ b/internal/runtime/kubernetes/resources_test.go @@ -2,6 +2,7 @@ package kubernetes import ( "context" + "encoding/json" "errors" "strings" "testing" @@ -15,6 +16,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/core/ports" coreservices "github.com/highcard-dev/daemon/internal/core/services" ) @@ -104,34 +106,6 @@ func TestProcedureStatefulSetSpecUsesProvidedRuntimeEnv(t *testing.T) { } } -func TestReadDataFileJobSpecScopesPathToScrollPVC(t *testing.T) { - job := readDataFileJobSpec("druid", "read-file", "druid-scroll-data", "alpine:3.20", "data/private/dist/app.wasm") - container := job.Spec.Template.Spec.Containers[0] - if job.Namespace != "druid" || job.Labels[labelComponent] != "read-data-file" { - t.Fatalf("unexpected job metadata: namespace=%s labels=%#v", job.Namespace, job.Labels) - } - if got := container.Command; len(got) != 2 || got[0] != "cat" || got[1] != "/scroll/data/private/dist/app.wasm" { - t.Fatalf("unexpected command: %#v", got) - } - if got := job.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName; got != "druid-scroll-data" { - t.Fatalf("claim = %s, want druid-scroll-data", got) - } -} - -func TestWriteDataFileJobSpecScopesPathToScrollPVC(t *testing.T) { - job := writeDataFileJobSpec("druid", "write-file", "druid-scroll-data", "alpine:3.20", "data/private/config.json", "e30=") - container := job.Spec.Template.Spec.Containers[0] - if job.Namespace != "druid" || job.Labels[labelComponent] != "write-data-file" { - t.Fatalf("unexpected job metadata: namespace=%s labels=%#v", job.Namespace, job.Labels) - } - if got := container.Command; len(got) != 5 || got[4] != "/scroll/data/private/config.json" { - t.Fatalf("unexpected command: %#v", got) - } - if len(container.Env) != 1 || container.Env[0].Name != "DRUID_DATA_FILE_B64" || container.Env[0].Value != "e30=" { - t.Fatalf("unexpected env: %#v", container.Env) - } -} - func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { procedure := &domain.Procedure{ Image: "nginx:1.27", @@ -174,7 +148,7 @@ func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { } func TestPullJobSpecPropagatesPlainHTTPRegistryEnv(t *testing.T) { - job := pullJobSpec("druid", "pull", "scroll-pvc", "druid-client:test", "registry:5000/lab:1.0", "", true) + job := pullJobSpec("druid", "pull", "scroll-pvc", "druid-cli:test", "registry:5000/lab:1.0", "", "", true) env := job.Spec.Template.Spec.Containers[0].Env if len(env) != 1 || env[0].Name != "DRUID_REGISTRY_PLAIN_HTTP" || env[0].Value != "true" { @@ -182,12 +156,122 @@ func TestPullJobSpecPropagatesPlainHTTPRegistryEnv(t *testing.T) { } } +func TestPullJobSpecMountsRegistryConfigSecret(t *testing.T) { + job := pullJobSpec("druid", "pull", "scroll-pvc", "druid-cli:test", "registry.local/lab:1.0", "", "runtime-registry", false) + container := job.Spec.Template.Spec.Containers[0] + if !strings.Contains(strings.Join(container.Command, " "), "--config /tmp/druid-registry.json") { + t.Fatalf("command = %#v, want generated registry config", container.Command) + } + if len(container.Env) != 1 || container.Env[0].Name != registryConfigEnvName { + t.Fatalf("env = %#v", container.Env) + } + ref := container.Env[0].ValueFrom.SecretKeyRef + if ref == nil || ref.Name != "runtime-registry" || ref.Key != registryConfigSecretKey { + t.Fatalf("secret ref = %#v", ref) + } +} + +func TestWorkerPullJobSpecRunsDruidWorkerPull(t *testing.T) { + action := ports.RuntimeWorkerAction{ + Mode: ports.RuntimeWorkerModeUpdate, + RuntimeID: "deployment-123", + Artifact: "registry.local/lab:2.0", + MountPath: "/scroll", + CallbackURL: "http://druid-cli:8083/internal/v1/workers/deployment-123/complete", + CallbackToken: "secret-token", + } + job := workerPullJobSpec("druid", "worker-pull", "runtime-pvc", "druid-cli:test", action, "pull-secret", "runtime-registry", true) + container := job.Spec.Template.Spec.Containers[0] + command := strings.Join(container.Command, " ") + for _, want := range []string{"druid --config /tmp/druid-registry.json", "worker pull", "--mode update", "--runtime-id deployment-123", "--callback-url"} { + if !strings.Contains(command, want) { + t.Fatalf("command = %#v, want %s", container.Command, want) + } + } + if strings.Contains(command, "--action-id") { + t.Fatalf("command = %#v, should not contain --action-id", container.Command) + } + env := map[string]string{} + for _, item := range container.Env { + env[item.Name] = item.Value + } + if env["DRUID_WORKER_TOKEN"] != "secret-token" || env["DRUID_REGISTRY_PLAIN_HTTP"] != "true" { + t.Fatalf("env = %#v", container.Env) + } + if len(job.Spec.Template.Spec.ImagePullSecrets) != 1 || job.Spec.Template.Spec.ImagePullSecrets[0].Name != "pull-secret" { + t.Fatalf("image pull secrets = %#v", job.Spec.Template.Spec.ImagePullSecrets) + } +} + +func TestSpawnPullWorkerCreateUsesFinalPVCAndWorkerJob(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid", PullImage: "druid-cli:test"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + var jobs []*batchv1.Job + backend.jobLogRunner = func(ctx context.Context, job *batchv1.Job) ([]byte, error) { + jobs = append(jobs, job.DeepCopy()) + return nil, nil + } + action := ports.RuntimeWorkerAction{ + Mode: ports.RuntimeWorkerModeCreate, + RuntimeID: "deployment-123", + Artifact: "registry.local/lab:1.0", + RootRef: ref("druid", dataPVCName("deployment-123")), + MountPath: "/scroll", + CallbackURL: "http://druid-cli:8083/internal/v1/workers/deployment-123/complete", + CallbackToken: "secret-token", + } + if err := backend.SpawnPullWorker(context.Background(), action); err != nil { + t.Fatal(err) + } + pvcs, err := client.CoreV1().PersistentVolumeClaims("druid").List(context.Background(), metav1.ListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(pvcs.Items) != 1 || pvcs.Items[0].Name != dataPVCName("deployment-123") { + t.Fatalf("pvcs = %#v, want final PVC", pvcs.Items) + } + if len(jobs) != 1 { + t.Fatalf("jobs = %d, want 1", len(jobs)) + } + command := strings.Join(jobs[0].Spec.Template.Spec.Containers[0].Command, " ") + if !strings.Contains(command, "worker pull") || strings.Contains(command, "cat /scroll/scroll.yaml") || strings.Contains(command, "--action-id") { + t.Fatalf("command = %#v", jobs[0].Spec.Template.Spec.Containers[0].Command) + } +} + +func TestRegistryConfigSecretUsesDruidClientConfigShape(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + secretName, cleanup, err := backend.createRegistryConfigSecret(context.Background(), "druid", "artifact", []domain.RegistryCredential{{ + Host: "artifacts.druid.gg/user/scroll", + Username: "robot$scroll", + Password: "secret", + }}) + if err != nil { + t.Fatal(err) + } + defer cleanup() + secret, err := client.CoreV1().Secrets("druid").Get(context.Background(), secretName, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + var config struct { + Registries []domain.RegistryCredential `json:"registries"` + } + if err := json.Unmarshal(secret.Data[registryConfigSecretKey], &config); err != nil { + t.Fatal(err) + } + if len(config.Registries) != 1 || config.Registries[0].Host != "artifacts.druid.gg/user/scroll" || config.Registries[0].Username != "robot$scroll" { + t.Fatalf("config = %#v", config) + } +} + func TestExpectedPortsUsesHubbleFlowPresence(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{hasFlow: true}) - dataRoot := ref("druid", "druid-static-web-data") + root := ref("druid", "druid-static-web-data") procedureName := "start" - service, err := serviceSpec("druid", dataRoot, procedureName, "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) + service, err := serviceSpec("druid", root, procedureName, "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -207,7 +291,7 @@ func TestExpectedPortsUsesHubbleFlowPresence(t *testing.T) { t.Fatal(err) } - statuses, err := backend.ExpectedPorts(dataRoot, map[string]*domain.CommandInstructionSet{ + statuses, err := backend.ExpectedPorts(root, map[string]*domain.CommandInstructionSet{ "start": {Procedures: []*domain.Procedure{{ Id: &procedureName, ExpectedPorts: []domain.ExpectedPort{{Name: "http", KeepAliveTraffic: "1b/5m"}}, @@ -234,8 +318,8 @@ func TestExpectedPortsUsesHubbleFlowPresence(t *testing.T) { func TestExpectedPortsDegradesWhenHubbleUnavailable(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{err: errors.New("relay unavailable")}) - dataRoot := ref("druid", "druid-static-web-data") - service, err := serviceSpec("druid", dataRoot, "start", "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) + root := ref("druid", "druid-static-web-data") + service, err := serviceSpec("druid", root, "start", "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -243,7 +327,7 @@ func TestExpectedPortsDegradesWhenHubbleUnavailable(t *testing.T) { t.Fatal(err) } - statuses, err := backend.ExpectedPorts(dataRoot, map[string]*domain.CommandInstructionSet{ + statuses, err := backend.ExpectedPorts(root, map[string]*domain.CommandInstructionSet{ "start": {Procedures: []*domain.Procedure{{ExpectedPorts: []domain.ExpectedPort{{Name: "http", KeepAliveTraffic: "1b/5m"}}}}}, }, []domain.Port{{Name: "http", Port: 80, Protocol: "tcp"}}) if err != nil { @@ -263,10 +347,10 @@ func TestExpectedPortsDegradesWhenHubbleUnavailable(t *testing.T) { func TestRoutingTargetsReturnStableBackendServices(t *testing.T) { backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) - dataRoot := ref("druid", "druid-static-web-data") + root := ref("druid", "druid-static-web-data") procedureID := "web" - targets, err := backend.RoutingTargets(dataRoot, map[string]*domain.CommandInstructionSet{ + targets, err := backend.RoutingTargets(root, map[string]*domain.CommandInstructionSet{ "serve": {Procedures: []*domain.Procedure{{ Id: &procedureID, ExpectedPorts: []domain.ExpectedPort{{Name: "http"}}, @@ -276,30 +360,75 @@ func TestRoutingTargetsReturnStableBackendServices(t *testing.T) { t.Fatal(err) } - if len(targets) != 1 { + if len(targets) != 2 { t.Fatalf("targets = %#v", targets) } - target := targets[0] - if target.Namespace != "druid" || target.ServiceName != serviceName(dataRoot, "web", "http") || target.ServicePort != 8080 { + var target domain.RuntimeRoutingTarget + var webdav domain.RuntimeRoutingTarget + for _, item := range targets { + if item.Name == "http" { + target = item + } + if item.Name == "webdav" { + webdav = item + } + } + if target.Namespace != "druid" || target.ServiceName != serviceName(root, "web", "http") || target.ServicePort != 8080 { t.Fatalf("target = %#v", target) } if target.Protocol != "http" || target.PortName != "http" || target.Procedure != "web" { t.Fatalf("target = %#v", target) } - if target.Selector[labelScrollID] != "druid-static-web-data" || target.Selector[labelProcedure] != "web" { + if target.Selector[labelScrollID] != "druid-static-web-data" || target.Selector[labelPortName] != "http" { t.Fatalf("selector = %#v", target.Selector) } + if webdav.ServiceName != serviceName(root, "dev", "webdav") || webdav.Port != 8084 || webdav.Protocol != "https" { + t.Fatalf("webdav target = %#v", webdav) + } +} + +func TestRoutingTargetsCollapseColdstarterAndRuntimePort(t *testing.T) { + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) + root := ref("druid", "druid-minecraft-data") + coldstart := "coldstart" + start := "start" + + targets, err := backend.RoutingTargets(root, map[string]*domain.CommandInstructionSet{ + "start": {Procedures: []*domain.Procedure{ + {Id: &coldstart, ExpectedPorts: []domain.ExpectedPort{{Name: "main"}}}, + {Id: &start, ExpectedPorts: []domain.ExpectedPort{{Name: "main"}}}, + }}, + }, []domain.Port{{Name: "main", Port: 25565, Protocol: "tcp"}}) + if err != nil { + t.Fatal(err) + } + + var mainTargets []domain.RuntimeRoutingTarget + for _, target := range targets { + if target.PortName == "main" { + mainTargets = append(mainTargets, target) + } + } + if len(mainTargets) != 1 { + t.Fatalf("main targets = %#v", mainTargets) + } + if mainTargets[0].Name != "main" || mainTargets[0].Procedure != "coldstart" { + t.Fatalf("main target = %#v", mainTargets[0]) + } + if mainTargets[0].Selector[labelPortName] != "main" { + t.Fatalf("selector = %#v", mainTargets[0].Selector) + } } func TestStopRuntimeDeletesWorkloadsButPreservesDataAndServices(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) - dataRoot := ref("druid", "druid-static-web-data") + root := ref("druid", "druid-static-web-data") labels := baseLabels("druid-static-web-data") labels[labelProcedure] = "web" - jobName := jobName("proc", dataRoot, "web") - statefulSetName := statefulSetName(dataRoot, "web") - service, err := serviceSpec("druid", dataRoot, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) + jobName := jobName("proc", root, "web") + statefulSetName := statefulSetName(root, "web") + service, err := serviceSpec("druid", root, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -330,7 +459,7 @@ func TestStopRuntimeDeletesWorkloadsButPreservesDataAndServices(t *testing.T) { } } - if err := backend.StopRuntime(dataRoot); err != nil { + if err := backend.StopRuntime(root); err != nil { t.Fatal(err) } if _, err := client.BatchV1().Jobs("druid").Get(context.Background(), jobName, metav1.GetOptions{}); !apierrors.IsNotFound(err) { @@ -353,8 +482,8 @@ func TestStopRuntimeDeletesWorkloadsButPreservesDataAndServices(t *testing.T) { func TestDeleteRuntimePurgesServicesAndDataWhenRequested(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) - dataRoot := ref("druid", "druid-static-web-data") - service, err := serviceSpec("druid", dataRoot, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) + root := ref("druid", "druid-static-web-data") + service, err := serviceSpec("druid", root, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -365,7 +494,7 @@ func TestDeleteRuntimePurgesServicesAndDataWhenRequested(t *testing.T) { t.Fatal(err) } - if err := backend.DeleteRuntime(dataRoot, true); err != nil { + if err := backend.DeleteRuntime(root, true); err != nil { t.Fatal(err) } @@ -378,7 +507,7 @@ func TestDeleteRuntimePurgesServicesAndDataWhenRequested(t *testing.T) { } func TestBackupAndRestoreJobSpecsUseRuntimePVCAndRegistryEnv(t *testing.T) { - backup := backupJobSpec("druid", "backup", "runtime-pvc", "druid-client:test", "registry.local/scroll:backup", "registry-secret", true) + backup := backupJobSpec("druid", "backup", "runtime-pvc", "druid-cli:test", "registry.local/scroll:backup", "registry-secret", "", true) if backup.Spec.Template.Spec.Containers[0].Command[1] != "push" { t.Fatalf("backup command = %#v", backup.Spec.Template.Spec.Containers[0].Command) } @@ -401,11 +530,16 @@ func TestBackupAndRestoreJobSpecsUseRuntimePVCAndRegistryEnv(t *testing.T) { } } -func TestMaterializationRequiresPullImage(t *testing.T) { +func TestSpawnPullWorkerRequiresPullImage(t *testing.T) { backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) - _, err := backend.MaterializeScroll(context.Background(), "ghcr.io/example/scroll:latest", "") + err := backend.SpawnPullWorker(context.Background(), ports.RuntimeWorkerAction{ + Mode: ports.RuntimeWorkerModeCreate, + RuntimeID: "scroll", + Artifact: "ghcr.io/example/scroll:latest", + RootRef: ref("druid", dataPVCName("scroll")), + }) if err == nil { - t.Fatal("MaterializeScroll error = nil, want missing pull image error") + t.Fatal("SpawnPullWorker error = nil, want missing pull image error") } if !strings.Contains(err.Error(), "pull image is required") { t.Fatalf("error = %v, want pull image required", err) @@ -415,8 +549,8 @@ func TestMaterializationRequiresPullImage(t *testing.T) { func TestSignalDeletesPersistentStatefulSetAndPods(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) - dataRoot := ref("druid", "druid-static-web-data") - name := statefulSetName(dataRoot, "start") + root := ref("druid", "druid-static-web-data") + name := statefulSetName(root, "start") labels := baseLabels("druid-static-web-data") labels[labelProcedure] = "start" if _, err := client.AppsV1().StatefulSets("druid").Create(context.Background(), &appsv1.StatefulSet{ @@ -430,7 +564,7 @@ func TestSignalDeletesPersistentStatefulSetAndPods(t *testing.T) { t.Fatal(err) } - if err := backend.Signal("", "start", "SIGKILL", dataRoot); err != nil { + if err := backend.Signal("", "start", "SIGKILL", root); err != nil { t.Fatal(err) } if _, err := client.AppsV1().StatefulSets("druid").Get(context.Background(), name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { diff --git a/internal/runtime/kubernetes/state_store.go b/internal/runtime/kubernetes/state_store.go index 0fabc1c5..8d53e3f8 100644 --- a/internal/runtime/kubernetes/state_store.go +++ b/internal/runtime/kubernetes/state_store.go @@ -20,19 +20,19 @@ import ( const ( runtimeStateComponent = "runtime-state" - configMapKeyID = "id" - configMapKeyOwnerID = "owner_id" - configMapKeyArtifact = "artifact" - configMapKeyScrollRoot = "scroll_root" - configMapKeyDataRoot = "data_root" - configMapKeyScrollName = "scroll_name" - configMapKeyScrollYAML = "scroll_yaml" - configMapKeyStatus = "status" - configMapKeyLastError = "last_error" - configMapKeyCreatedAt = "created_at" - configMapKeyUpdatedAt = "updated_at" - configMapKeyCommandsJSON = "commands_json" - configMapKeyRoutingJSON = "routing_json" + configMapKeyID = "id" + configMapKeyOwnerID = "owner_id" + configMapKeyArtifact = "artifact" + configMapKeyArtifactDigest = "artifact_digest" + configMapKeyRoot = "root" + configMapKeyScrollName = "scroll_name" + configMapKeyScrollYAML = "scroll_yaml" + configMapKeyStatus = "status" + configMapKeyLastError = "last_error" + configMapKeyCreatedAt = "created_at" + configMapKeyUpdatedAt = "updated_at" + configMapKeyCommandsJSON = "commands_json" + configMapKeyRoutingJSON = "routing_json" ) type ConfigMapStateStore struct { @@ -64,11 +64,7 @@ func (s *ConfigMapStateStore) StateDir() string { return fmt.Sprintf("kubernetes:%s/configmaps", s.namespace) } -func (s *ConfigMapStateStore) ScrollRoot(id string) string { - return ref(s.namespace, dataPVCName(id)) -} - -func (s *ConfigMapStateStore) DataRoot(id string) string { +func (s *ConfigMapStateStore) Root(id string) string { return ref(s.namespace, dataPVCName(id)) } @@ -177,19 +173,19 @@ func runtimeScrollConfigMap(namespace string, scroll *domain.RuntimeScroll) (*co }, }, Data: map[string]string{ - configMapKeyID: scroll.ID, - configMapKeyOwnerID: scroll.OwnerID, - configMapKeyArtifact: scroll.Artifact, - configMapKeyScrollRoot: scroll.ScrollRoot, - configMapKeyDataRoot: scroll.DataRoot, - configMapKeyScrollName: scroll.ScrollName, - configMapKeyScrollYAML: scroll.ScrollYAML, - configMapKeyStatus: string(scroll.Status), - configMapKeyLastError: scroll.LastError, - configMapKeyCreatedAt: formatRuntimeTime(scroll.CreatedAt), - configMapKeyUpdatedAt: formatRuntimeTime(scroll.UpdatedAt), - configMapKeyCommandsJSON: string(commands), - configMapKeyRoutingJSON: string(routing), + configMapKeyID: scroll.ID, + configMapKeyOwnerID: scroll.OwnerID, + configMapKeyArtifact: scroll.Artifact, + configMapKeyArtifactDigest: scroll.ArtifactDigest, + configMapKeyRoot: scroll.Root, + configMapKeyScrollName: scroll.ScrollName, + configMapKeyScrollYAML: scroll.ScrollYAML, + configMapKeyStatus: string(scroll.Status), + configMapKeyLastError: scroll.LastError, + configMapKeyCreatedAt: formatRuntimeTime(scroll.CreatedAt), + configMapKeyUpdatedAt: formatRuntimeTime(scroll.UpdatedAt), + configMapKeyCommandsJSON: string(commands), + configMapKeyRoutingJSON: string(routing), }, }, nil } @@ -217,19 +213,19 @@ func runtimeScrollFromConfigMap(configMap *corev1.ConfigMap) (*domain.RuntimeScr id = configMap.Labels[labelScrollID] } scroll := &domain.RuntimeScroll{ - ID: id, - OwnerID: data[configMapKeyOwnerID], - Artifact: data[configMapKeyArtifact], - ScrollRoot: data[configMapKeyScrollRoot], - DataRoot: data[configMapKeyDataRoot], - ScrollName: data[configMapKeyScrollName], - ScrollYAML: data[configMapKeyScrollYAML], - Status: domain.RuntimeScrollStatus(data[configMapKeyStatus]), - LastError: data[configMapKeyLastError], - Routing: routing, - CreatedAt: parseRuntimeTime(data[configMapKeyCreatedAt]), - UpdatedAt: parseRuntimeTime(data[configMapKeyUpdatedAt]), - Commands: commands, + ID: id, + OwnerID: data[configMapKeyOwnerID], + Artifact: data[configMapKeyArtifact], + ArtifactDigest: data[configMapKeyArtifactDigest], + Root: data[configMapKeyRoot], + ScrollName: data[configMapKeyScrollName], + ScrollYAML: data[configMapKeyScrollYAML], + Status: domain.RuntimeScrollStatus(data[configMapKeyStatus]), + LastError: data[configMapKeyLastError], + Routing: routing, + CreatedAt: parseRuntimeTime(data[configMapKeyCreatedAt]), + UpdatedAt: parseRuntimeTime(data[configMapKeyUpdatedAt]), + Commands: commands, } if scroll.Status == "" { scroll.Status = domain.RuntimeScrollStatusCreated diff --git a/internal/runtime/kubernetes/state_store_test.go b/internal/runtime/kubernetes/state_store_test.go index a29b1829..b7d5e7d2 100644 --- a/internal/runtime/kubernetes/state_store_test.go +++ b/internal/runtime/kubernetes/state_store_test.go @@ -16,8 +16,7 @@ func TestConfigMapStateStoreRoundTripsRuntimeScroll(t *testing.T) { scroll := &domain.RuntimeScroll{ ID: "container-lab", Artifact: "registry.local/container-lab:1.0", - ScrollRoot: ref("druid", "druid-container-lab-data"), - DataRoot: ref("druid", "druid-container-lab-data"), + Root: ref("druid", "druid-container-lab-data"), ScrollName: "container-lab", ScrollYAML: "name: container-lab\n", Status: domain.RuntimeScrollStatusCreated, @@ -34,7 +33,7 @@ func TestConfigMapStateStoreRoundTripsRuntimeScroll(t *testing.T) { if err != nil { t.Fatal(err) } - if got.Artifact != scroll.Artifact || got.ScrollRoot != scroll.ScrollRoot || got.ScrollYAML != scroll.ScrollYAML { + if got.Artifact != scroll.Artifact || got.Root != scroll.Root || got.ScrollYAML != scroll.ScrollYAML { t.Fatalf("stored scroll mismatch: %#v", got) } if got.Commands["verify"].Status != domain.ScrollLockStatusError { @@ -79,8 +78,7 @@ func TestConfigMapStateStoreDuplicateCreateReturnsConflict(t *testing.T) { scroll := &domain.RuntimeScroll{ ID: "duplicate", Artifact: "local", - ScrollRoot: ref("druid", "druid-duplicate-data"), - DataRoot: ref("druid", "druid-duplicate-data"), + Root: ref("druid", "druid-duplicate-data"), ScrollName: "duplicate", ScrollYAML: "name: duplicate\n", } @@ -106,10 +104,7 @@ func TestConfigMapStateStoreMissingScrollReturnsNotFound(t *testing.T) { func TestConfigMapStateStoreDerivesKubernetesRoots(t *testing.T) { store := NewConfigMapStateStoreWithClient("druid", fake.NewSimpleClientset()) want := "k8s://druid/druid-container-lab-data" - if got := store.ScrollRoot("container-lab"); got != want { - t.Fatalf("ScrollRoot = %s, want %s", got, want) - } - if got := store.DataRoot("container-lab"); got != want { - t.Fatalf("DataRoot = %s, want %s", got, want) + if got := store.Root("container-lab"); got != want { + t.Fatalf("Root = %s, want %s", got, want) } } diff --git a/internal/runtime/runtime_test.go b/internal/runtime/runtime_test.go index 502c3337..f04d1b65 100644 --- a/internal/runtime/runtime_test.go +++ b/internal/runtime/runtime_test.go @@ -1,18 +1,17 @@ package runtime_test import ( - "context" - "os" "path/filepath" "reflect" "testing" + "github.com/docker/docker/api/types/mount" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/runtime/docker" ) func TestDockerRunCommandBuildsCanonicalMounts(t *testing.T) { - dataRoot := t.TempDir() + root := t.TempDir() procedure := &domain.Procedure{ Image: "alpine:3.20", Command: []string{"sh", "-c", "echo ok"}, @@ -25,7 +24,7 @@ func TestDockerRunCommandBuildsCanonicalMounts(t *testing.T) { Mounts: []domain.Mount{{Path: "/cache", SubPath: "cache"}}, } - spec, err := docker.BuildContainerSpec("start", procedure, dataRoot, []domain.Port{{Name: "http", Port: 8080, Protocol: "http"}}) + spec, err := docker.BuildContainerSpec("start", procedure, root, []domain.Port{{Name: "http", Port: 8080, Protocol: "http"}}) if err != nil { t.Fatal(err) } @@ -42,29 +41,30 @@ func TestDockerRunCommandBuildsCanonicalMounts(t *testing.T) { if !reflect.DeepEqual(spec.Env, []string{"A=one", "B=two"}) { t.Fatalf("unexpected env: %#v", spec.Env) } - expectedBinds := []string{ - filepath.Join(dataRoot, "data", "cache") + ":/cache", + expectedMounts := []mount.Mount{ + { + Type: mount.TypeBind, + Source: filepath.Join(root, "data", "cache"), + Target: "/cache", + BindOptions: &mount.BindOptions{CreateMountpoint: true}, + }, } - if !reflect.DeepEqual(spec.Binds, expectedBinds) { - t.Fatalf("unexpected binds:\nexpected: %#v\nactual: %#v", expectedBinds, spec.Binds) + if !reflect.DeepEqual(spec.Mounts, expectedMounts) { + t.Fatalf("unexpected mounts:\nexpected: %#v\nactual: %#v", expectedMounts, spec.Mounts) } if len(spec.PortBindings) != 1 { t.Fatalf("expected one port binding, got %#v", spec.PortBindings) } - - if _, err := os.Stat(filepath.Join(dataRoot, "data", "cache")); err != nil { - t.Fatalf("expected mount subpath to be created: %v", err) - } } func TestDockerBuildContainerSpecUsesProvidedRuntimeEnv(t *testing.T) { - dataRoot := t.TempDir() + root := t.TempDir() spec, err := docker.BuildContainerSpecWithEnv("start", &domain.Procedure{ Image: "alpine:3.20", Env: map[string]string{ "PROCEDURE_ONLY": "ignored", }, - }, dataRoot, nil, map[string]string{ + }, root, nil, map[string]string{ "DRUID_PORT_HTTP": "8080", }) if err != nil { @@ -75,31 +75,57 @@ func TestDockerBuildContainerSpecUsesProvidedRuntimeEnv(t *testing.T) { } } -func TestDockerRunCommandDefaultsMountSubPathToDataRoot(t *testing.T) { - dataRoot := t.TempDir() +func TestDockerRunCommandDefaultsMountSubPathToRoot(t *testing.T) { + root := t.TempDir() spec, err := docker.BuildContainerSpec("start", &domain.Procedure{ Image: "alpine:3.20", Mounts: []domain.Mount{{Path: "/server"}}, Command: []string{"true"}, - }, dataRoot, nil) + }, root, nil) + if err != nil { + t.Fatal(err) + } + expectedMounts := []mount.Mount{{ + Type: mount.TypeBind, + Source: filepath.Join(root, "data"), + Target: "/server", + BindOptions: &mount.BindOptions{CreateMountpoint: true}, + }} + if !reflect.DeepEqual(spec.Mounts, expectedMounts) { + t.Fatalf("unexpected mounts:\nexpected: %#v\nactual: %#v", expectedMounts, spec.Mounts) + } +} + +func TestDockerContainerSpecUsesVolumeSubpath(t *testing.T) { + spec, err := docker.BuildContainerSpec("start", &domain.Procedure{ + Image: "alpine:3.20", + Mounts: []domain.Mount{{Path: "/server", SubPath: "public", ReadOnly: true}}, + Command: []string{"true"}, + }, "docker-volume://druid-scroll-data", nil) if err != nil { t.Fatal(err) } - expectedBinds := []string{filepath.Join(dataRoot, "data") + ":/server"} - if !reflect.DeepEqual(spec.Binds, expectedBinds) { - t.Fatalf("unexpected binds:\nexpected: %#v\nactual: %#v", expectedBinds, spec.Binds) + expectedMounts := []mount.Mount{{ + Type: mount.TypeVolume, + Source: "druid-scroll-data", + Target: "/server", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{Subpath: "data/public"}, + }} + if !reflect.DeepEqual(spec.Mounts, expectedMounts) { + t.Fatalf("unexpected mounts:\nexpected: %#v\nactual: %#v", expectedMounts, spec.Mounts) } } func TestDockerBuildContainerSpecSupportsTTY(t *testing.T) { - dataRoot := t.TempDir() + root := t.TempDir() spec, err := docker.BuildContainerSpec("build.0", &domain.Procedure{ Image: domain.DefaultExecImage, Command: []string{"bash", "-lc", "echo ok"}, WorkingDir: "/work", TTY: true, - }, dataRoot, nil) + }, root, nil) if err != nil { t.Fatal(err) } @@ -110,60 +136,3 @@ func TestDockerBuildContainerSpecSupportsTTY(t *testing.T) { t.Fatalf("unexpected image: %s", spec.Image) } } - -func TestDockerReadScrollFile(t *testing.T) { - scrollRoot := t.TempDir() - want := []byte("name: test\n") - if err := os.WriteFile(filepath.Join(scrollRoot, "scroll.yaml"), want, 0644); err != nil { - t.Fatal(err) - } - backend := &docker.Backend{} - got, err := backend.ReadScrollFile(scrollRoot) - if err != nil { - t.Fatal(err) - } - if string(got) != string(want) { - t.Fatalf("scroll yaml = %q, want %q", got, want) - } -} - -func TestDockerReadDataFileScopesToDataRoot(t *testing.T) { - dataRoot := t.TempDir() - want := []byte("bundle") - path := filepath.Join(dataRoot, "data", "private", "dist") - if err := os.MkdirAll(path, 0755); err != nil { - t.Fatal(err) - } - if err := os.WriteFile(filepath.Join(path, "app.wasm"), want, 0644); err != nil { - t.Fatal(err) - } - backend := &docker.Backend{} - got, err := backend.ReadDataFile(context.Background(), dataRoot, "/data/private/dist/app.wasm") - if err != nil { - t.Fatal(err) - } - if string(got) != string(want) { - t.Fatalf("data file = %q, want %q", got, want) - } - if _, err := backend.ReadDataFile(context.Background(), dataRoot, "../escape"); err == nil { - t.Fatal("expected traversal path to be rejected") - } -} - -func TestDockerWriteDataFileScopesToDataRoot(t *testing.T) { - dataRoot := t.TempDir() - backend := &docker.Backend{} - if err := backend.WriteDataFile(context.Background(), dataRoot, "data/private/config.json", []byte("{}")); err != nil { - t.Fatal(err) - } - got, err := os.ReadFile(filepath.Join(dataRoot, "data", "private", "config.json")) - if err != nil { - t.Fatal(err) - } - if string(got) != "{}" { - t.Fatalf("written data = %q, want {}", got) - } - if err := backend.WriteDataFile(context.Background(), dataRoot, "../escape", []byte("bad")); err == nil { - t.Fatal("expected traversal path to be rejected") - } -} diff --git a/internal/utils/random.go b/internal/utils/random.go index 3452af90..2361d45c 100644 --- a/internal/utils/random.go +++ b/internal/utils/random.go @@ -1,8 +1,8 @@ package utils import ( + "crypto/rand" "encoding/base64" - "math/rand" ) // GenerateRandomStringURLSafe returns a URL-safe, base64 encoded diff --git a/scripts/build_coldstarter_image.sh b/scripts/build_coldstarter_image.sh deleted file mode 100755 index b1d23e4f..00000000 --- a/scripts/build_coldstarter_image.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -IMAGE="${IMAGE:-druid-coldstarter:local}" -VERSION="${VERSION:-local}" -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" - -echo "Building local coldstarter image: ${IMAGE}" -docker build \ - --file "${ROOT_DIR}/Dockerfile.coldstarter" \ - --build-arg "VERSION=${VERSION}" \ - --tag "${IMAGE}" \ - "${ROOT_DIR}" - -echo "Built ${IMAGE}" diff --git a/test/integration/docker/docker_cli_test.go b/test/integration/docker/docker_cli_test.go index 7394d8a2..55dbc642 100644 --- a/test/integration/docker/docker_cli_test.go +++ b/test/integration/docker/docker_cli_test.go @@ -4,6 +4,8 @@ package docker_test import ( "fmt" + "io" + "net/http" "os" "path/filepath" "strings" @@ -18,24 +20,32 @@ func TestDockerBackendCLIComplexLifecycle(t *testing.T) { bins := e2e.BuildBinaries(t) port := e2e.FreePort(t) routePort := e2e.FreePort(t) + callbackPort := e2e.FreePort(t) name := fmt.Sprintf("docker-cli-%d", time.Now().UnixNano()) fixture := e2e.WriteFixture(t, filepath.Join(t.TempDir(), "scroll"), name, port, routePort) + workerImage := e2e.BuildDockerImage(t, "druid-cli-e2e:"+name) + containerHost := e2e.DockerHostAddress(t) socket := filepath.Join(os.TempDir(), fmt.Sprintf("druid-docker-%d.sock", time.Now().UnixNano())) t.Cleanup(func() { _ = os.Remove(socket) }) stateDir := filepath.Join(t.TempDir(), "state") - logs := e2e.StartDaemon(t, bins, "docker", socket, stateDir, nil, nil) + logs := e2e.StartDaemon(t, bins, "docker", socket, stateDir, []string{ + "--docker-worker-image", workerImage, + "--docker-storage", "bind", + "--docker-bind-root", filepath.Join(stateDir, "scrolls"), + "--worker-callback-listen", fmt.Sprintf(":%d", callbackPort), + "--worker-callback-url", fmt.Sprintf("http://%s:%d", containerHost, callbackPort), + }, nil) t.Cleanup(func() { if t.Failed() { t.Logf("druid daemon logs:\n%s", logs.String()) } }) - created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "--no-start", "--state-dir", stateDir, fixture.Dir, fixture.Name) + created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "-p", fmt.Sprintf("%d:http", fixture.RoutePort), fixture.Dir, fixture.Name) if created.Status != "created" { t.Fatalf("created status = %s, want created", created.Status) } - e2e.RunClient(t, bins, socket, "routing", "apply", created.ID, "--file", fixture.RoutingFile) started := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "start", created.ID) if started.Status != "running" { @@ -52,11 +62,11 @@ func TestDockerBackendCLIComplexLifecycle(t *testing.T) { assertPortBound(t, statuses, fixture) e2e.RunClient(t, bins, socket, "run", created.ID, "record") - dataRoot := filepath.Join(stateDir, "scrolls", created.ID, "data") - if got := readFile(t, filepath.Join(dataRoot, "finite.txt")); !strings.Contains(got, "finite-ok") { + root := strings.TrimPrefix(created.Root, "docker-bind://") + if got := readDockerRootFile(t, root, "data/finite.txt"); !strings.Contains(got, "finite-ok") { t.Fatalf("finite file = %q, want finite-ok", got) } - recordEnv := e2e.ParseEnv(readFile(t, filepath.Join(dataRoot, "record-env.txt"))) + recordEnv := e2e.ParseEnv(readDockerRootFile(t, root, "data/record-env.txt")) e2e.AssertRuntimeEnv(t, recordEnv, fixture, "docker", created.ID) if recordEnv["USER_ENV"] != "finite" { t.Fatalf("record USER_ENV = %q, want finite", recordEnv["USER_ENV"]) @@ -70,6 +80,130 @@ func TestDockerBackendCLIComplexLifecycle(t *testing.T) { } } +func TestDockerBackendVolumeStorageWorkerLifecycleBackupRestore(t *testing.T) { + e2e.RequireDocker(t) + bins := e2e.BuildBinaries(t) + port := e2e.FreePort(t) + routePort := e2e.FreePort(t) + callbackPort := e2e.FreePort(t) + publicPort := e2e.FreePort(t) + managementPort := e2e.FreePort(t) + registryPort := e2e.StartRegistry(t) + containerHost := e2e.DockerHostAddress(t) + name := fmt.Sprintf("docker-volume-%d", time.Now().UnixNano()) + fixture := e2e.WriteFixture(t, filepath.Join(t.TempDir(), "scroll"), name, port, routePort) + workerImage := e2e.BuildDockerImage(t, "druid-cli-e2e:"+name) + + pushArtifact := fmt.Sprintf("127.0.0.1:%d/druid-e2e/%s:v1", registryPort, name) + runtimeArtifact := fmt.Sprintf("%s:%d/druid-e2e/%s:v1", containerHost, registryPort, name) + backupArtifact := fmt.Sprintf("%s:%d/druid-e2e/%s-backup:v1", containerHost, registryPort, name) + e2e.RunEnv(t, []string{"DRUID_REGISTRY_PLAIN_HTTP=true", "HOME=" + bins.Home}, bins.Druid, "push", pushArtifact, fixture.Dir) + + socket := filepath.Join(os.TempDir(), fmt.Sprintf("druid-docker-volume-%d.sock", time.Now().UnixNano())) + t.Cleanup(func() { _ = os.Remove(socket) }) + stateDir := filepath.Join(t.TempDir(), "state") + logs := e2e.StartDaemon(t, bins, "docker", socket, stateDir, []string{ + "--docker-worker-image", workerImage, + "--docker-volume-prefix", "druid-e2e", + "--worker-callback-listen", fmt.Sprintf(":%d", callbackPort), + "--worker-callback-url", fmt.Sprintf("http://%s:%d", containerHost, callbackPort), + "--listen", fmt.Sprintf(":%d", managementPort), + "--worker-daemon-url", fmt.Sprintf("http://%s:%d", containerHost, managementPort), + "--public-listen", fmt.Sprintf(":%d", publicPort), + }, []string{"DRUID_REGISTRY_PLAIN_HTTP=true"}) + t.Cleanup(func() { + if t.Failed() { + t.Logf("druid daemon logs:\n%s", logs.String()) + } + }) + + created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, + "create", + "-p", fmt.Sprintf("%d:http", fixture.RoutePort), + "-p", fmt.Sprintf("%d:webdav", publicPort), + runtimeArtifact, + fixture.Name, + ) + if created.Status != "created" { + t.Fatalf("created status = %s, want created", created.Status) + } + if !strings.HasPrefix(created.Root, "docker-volume://druid-e2e-") { + t.Fatalf("root = %s, want docker volume ref", created.Root) + } + started := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "start", created.ID) + if started.Status != "running" { + t.Fatalf("started status = %s, want running", started.Status) + } + body := e2e.WaitHTTP(t, fmt.Sprintf("http://127.0.0.1:%d/env.txt", fixture.Port)) + env := e2e.ParseEnv(body) + e2e.AssertRuntimeEnv(t, env, fixture, "docker", created.ID) + + e2e.RunClient(t, bins, socket, "dev", created.ID, "--watch", "data", "--command", "record") + + finiteURL := fmt.Sprintf("http://127.0.0.1:%d/webdav/data/finite.txt", publicPort) + if got := e2e.WaitHTTP(t, finiteURL); !strings.Contains(got, "finite-ok") { + t.Fatalf("finite file = %q, want finite-ok", got) + } + + e2e.UnixJSONRequest(t, socket, http.MethodPost, "/api/v1/scrolls/"+created.ID+"/backup", fmt.Sprintf(`{"artifact":%q}`, backupArtifact)) + indexURL := fmt.Sprintf("http://127.0.0.1:%d/webdav/data/public/index.txt", publicPort) + httpPut(t, indexURL, "mutated\n") + if got := e2e.WaitHTTP(t, fmt.Sprintf("http://127.0.0.1:%d/index.txt", fixture.Port)); !strings.Contains(got, "mutated") { + t.Fatalf("mutated index = %q, want mutated", got) + } + + e2e.UnixJSONRequest(t, socket, http.MethodPost, "/api/v1/scrolls/"+created.ID+"/restore", fmt.Sprintf(`{"artifact":%q,"restart":true}`, backupArtifact)) + if got := e2e.WaitHTTP(t, fmt.Sprintf("http://127.0.0.1:%d/index.txt", fixture.Port)); !strings.Contains(got, "healthy") { + t.Fatalf("restored index = %q, want healthy", got) + } + e2e.UnixJSONRequest(t, socket, http.MethodDelete, "/api/v1/scrolls/"+created.ID+"?purge_data=true", "") +} + +func TestDockerBackendColdstarterFrontsRuntime(t *testing.T) { + e2e.RequireDocker(t) + bins := e2e.BuildBinaries(t) + runtimePort := e2e.FreePort(t) + publicPort := e2e.FreePort(t) + callbackPort := e2e.FreePort(t) + name := fmt.Sprintf("docker-coldstart-%d", time.Now().UnixNano()) + image := e2e.BuildDockerImage(t, "druid-coldstart-e2e:"+name) + fixtureDir := writeColdstarterFixture(t, filepath.Join(t.TempDir(), "scroll"), name, image, runtimePort) + containerHost := e2e.DockerHostAddress(t) + + socket := filepath.Join(os.TempDir(), fmt.Sprintf("druid-coldstart-%d.sock", time.Now().UnixNano())) + t.Cleanup(func() { _ = os.Remove(socket) }) + stateDir := filepath.Join(t.TempDir(), "state") + logs := e2e.StartDaemon(t, bins, "docker", socket, stateDir, []string{ + "--docker-worker-image", image, + "--docker-storage", "bind", + "--docker-bind-root", filepath.Join(stateDir, "scrolls"), + "--worker-callback-listen", fmt.Sprintf(":%d", callbackPort), + "--worker-callback-url", fmt.Sprintf("http://%s:%d", containerHost, callbackPort), + }, nil) + t.Cleanup(func() { + if t.Failed() { + t.Logf("druid daemon logs:\n%s", logs.String()) + } + }) + + created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "-p", fmt.Sprintf("%d:http", publicPort), fixtureDir, name) + if created.Status != "created" { + t.Fatalf("created status = %s, want created", created.Status) + } + started := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "start", created.ID) + if started.Status != "running" { + t.Fatalf("started status = %s, want running", started.Status) + } + if got := e2e.WaitHTTP(t, fmt.Sprintf("http://127.0.0.1:%d/index.txt", runtimePort)); !strings.Contains(got, "cold-started") { + t.Fatalf("served body = %q, want cold-started", got) + } + root := strings.TrimPrefix(created.Root, "docker-bind://") + if got := readDockerRootFile(t, root, ".coldstarter-finished.json"); !strings.Contains(got, "http") { + t.Fatalf("coldstarter status file = %q, want port status", got) + } + e2e.RunClient(t, bins, socket, "delete", created.ID) +} + func assertPortBound(t *testing.T, statuses []e2e.RuntimePortStatus, fixture e2e.Fixture) { t.Helper() for _, status := range statuses { @@ -86,13 +220,77 @@ func assertPortBound(t *testing.T, statuses []e2e.RuntimePortStatus, fixture e2e t.Fatalf("http port for %s not found in %#v", fixture.ServeProc, statuses) } -func readFile(t *testing.T, path string) string { +func writeColdstarterFixture(t *testing.T, dir string, name string, image string, port int) string { t.Helper() - data, err := os.ReadFile(path) - if err != nil { + yaml := fmt.Sprintf(`name: %s +desc: Coldstarter integration fixture +version: 0.1.0 +app_version: "test" +serve: start +ports: + - name: http + protocol: http + port: %d + mandatory: true + sleep_handler: generic +commands: + start: + run: restart + procedures: + - id: coldstart + image: %s + expectedPorts: + - name: http + keepAliveTraffic: 1b/5m + mounts: + - path: /runtime + sub_path: . + command: + - druid-coldstarter + - --root + - /runtime + - --status-file + - .coldstarter-finished.json + - id: web + image: busybox:1.36 + expectedPorts: + - name: http + keepAliveTraffic: 1b/5m + mounts: + - path: /site + sub_path: public + command: + - sh + - -c + - >- + set -eu; + mkdir -p /site; + printf 'cold-started\n' > /site/index.txt; + httpd -f -p %d -h /site +`, name, port, image, port) + if err := os.MkdirAll(dir, 0755); err != nil { t.Fatal(err) } - return string(data) + if err := os.WriteFile(filepath.Join(dir, "scroll.yaml"), []byte(yaml), 0644); err != nil { + t.Fatal(err) + } + return dir +} + +func readDockerRootFile(t *testing.T, root string, path string) string { + t.Helper() + deadline := time.Now().Add(30 * time.Second) + var last string + for time.Now().Before(deadline) { + out := e2e.Run(t, "docker", "run", "--rm", "-v", root+":/runtime:ro", "busybox:1.36", "sh", "-c", "cat /runtime/"+path+" 2>&1 || true") + if !strings.Contains(out, "No such file") { + return out + } + last = out + time.Sleep(250 * time.Millisecond) + } + t.Fatalf("read docker root %s:%s: %s", root, path, last) + return "" } func waitDockerContainersGone(t *testing.T, labels ...string) { @@ -113,3 +311,20 @@ func waitDockerContainersGone(t *testing.T, labels ...string) { } t.Fatalf("docker containers still exist for labels %v", labels) } + +func httpPut(t *testing.T, url string, body string) { + t.Helper() + req, err := http.NewRequest(http.MethodPut, url, strings.NewReader(body)) + if err != nil { + t.Fatal(err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode >= 400 { + data, _ := io.ReadAll(resp.Body) + t.Fatalf("PUT %s failed with %d: %s", url, resp.StatusCode, data) + } +} diff --git a/test/integration/example_test.go b/test/integration/example_test.go index 65ad597c..e2c24504 100644 --- a/test/integration/example_test.go +++ b/test/integration/example_test.go @@ -116,7 +116,7 @@ func TestExamples(t *testing.T) { go queueManager.Work() - err = queueManager.AddAndRememberItem("start") + err = queueManager.AddTempItem("start") if err != nil { t.Error(err) @@ -145,11 +145,12 @@ func TestExamples(t *testing.T) { return } - err = queueManager.AddShutdownItem("stop") + err = queueManager.AddTempItemWithWait("stop") if err != nil { t.Error(err) return } + queueManager.Shutdown() if config.TestAddress != "" { err = test_utils.ConnectionTest(config.TestAddress, false) diff --git a/test/integration/internal/e2e/harness.go b/test/integration/internal/e2e/harness.go index d3da8ce8..dcbac7e8 100644 --- a/test/integration/internal/e2e/harness.go +++ b/test/integration/internal/e2e/harness.go @@ -19,26 +19,25 @@ import ( ) type Binaries struct { - Druid string - Client string - Home string + Druid string + Home string } type Fixture struct { - Dir string - Name string - ServeProc string - RecordProc string - Port int - RouteHost string - RouteURL string - RoutePort int - RoutingFile string + Dir string + Name string + ServeProc string + RecordProc string + Port int + RouteHost string + RouteURL string + RoutePort int } type RuntimeScroll struct { ID string `json:"id"` Status string `json:"status"` + Root string `json:"root"` } type RuntimePortStatus struct { @@ -94,12 +93,10 @@ func BuildBinaries(t *testing.T) Binaries { t.Fatal(err) } bins := Binaries{ - Druid: filepath.Join(binDir, "druid"), - Client: filepath.Join(binDir, "druid-client"), - Home: home, + Druid: filepath.Join(binDir, "druid"), + Home: home, } build(t, "./apps/druid", bins.Druid) - build(t, "./apps/druid-client", bins.Client) return bins } @@ -121,7 +118,7 @@ func StartDaemon(t *testing.T, bins Binaries, runtimeName string, socket string, t.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) - args := []string{"serve", "--runtime", runtimeName, "--socket", socket, "--state-dir", stateDir} + args := []string{"daemon", "--runtime", runtimeName, "--socket", socket, "--state-dir", stateDir} args = append(args, extraArgs...) cmd := exec.CommandContext(ctx, bins.Druid, args...) cmd.Dir = RepoRoot(t) @@ -160,12 +157,12 @@ func RunClient(t *testing.T, bins Binaries, socket string, args ...string) strin config := filepath.Join(bins.Home, "client.yaml") envFile := filepath.Join(bins.Home, ".env") fullArgs := append([]string{"--daemon-socket", socket, "--config", config, "--env-file", envFile}, args...) - cmd := exec.CommandContext(ctx, bins.Client, fullArgs...) + cmd := exec.CommandContext(ctx, bins.Druid, fullArgs...) cmd.Dir = RepoRoot(t) cmd.Env = append(os.Environ(), "HOME="+bins.Home) out, err := cmd.CombinedOutput() if err != nil { - t.Fatalf("druid-client %s failed: %v\n%s", strings.Join(args, " "), err, out) + t.Fatalf("druid %s failed: %v\n%s", strings.Join(args, " "), err, out) } return string(out) } @@ -175,7 +172,7 @@ func RunClientJSON[T any](t *testing.T, bins Binaries, socket string, args ...st out := RunClient(t, bins, socket, args...) var value T if err := json.Unmarshal([]byte(out), &value); err != nil { - t.Fatalf("decode druid-client %s JSON: %v\n%s", strings.Join(args, " "), err, out) + t.Fatalf("decode druid %s JSON: %v\n%s", strings.Join(args, " "), err, out) } return value } @@ -195,7 +192,7 @@ func WriteFixture(t *testing.T, dir string, name string, port int, routePort int suffix := strings.ToLower(strings.ReplaceAll(name, "_", "-")) serveProc := "web-" + suffix recordProc := "record-" + suffix - routeHost := name + ".runtime.test" + routeHost := "localhost" routeURL := fmt.Sprintf("http://%s:%d", routeHost, routePort) yaml := fmt.Sprintf(`name: %s desc: CLI integration fixture with persistent data, a finite command, declared ports, and runtime env checks @@ -255,33 +252,15 @@ commands: if err := os.WriteFile(filepath.Join(dir, "scroll.yaml"), []byte(yaml), 0644); err != nil { t.Fatal(err) } - routingFile := filepath.Join(dir, "routing.json") - routing := fmt.Sprintf(`{ - "assignments": [ - { - "name": "%s-http", - "port_name": "http", - "host": "%s", - "external_ip": "127.0.0.1", - "public_port": %d, - "url": "%s", - "protocol": "http" - } - ] -}`, serveProc, routeHost, routePort, routeURL) - if err := os.WriteFile(routingFile, []byte(routing), 0644); err != nil { - t.Fatal(err) - } return Fixture{ - Dir: dir, - Name: name, - ServeProc: serveProc, - RecordProc: recordProc, - Port: port, - RouteHost: routeHost, - RouteURL: routeURL, - RoutePort: routePort, - RoutingFile: routingFile, + Dir: dir, + Name: name, + ServeProc: serveProc, + RecordProc: recordProc, + Port: port, + RouteHost: routeHost, + RouteURL: routeURL, + RoutePort: routePort, } } @@ -375,14 +354,140 @@ func RequireDocker(t *testing.T) { } func Run(t *testing.T, name string, args ...string) string { + t.Helper() + return RunEnv(t, nil, name, args...) +} + +func RunEnv(t *testing.T, env []string, name string, args ...string) string { t.Helper() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() cmd := exec.CommandContext(ctx, name, args...) cmd.Dir = RepoRoot(t) + if len(env) > 0 { + cmd.Env = append(os.Environ(), env...) + } out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("%s %s failed: %v\n%s", name, strings.Join(args, " "), err, out) } return string(out) } + +func BuildDockerImage(t *testing.T, tag string) string { + t.Helper() + contextDir := t.TempDir() + druid := filepath.Join(contextDir, "druid") + coldstarter := filepath.Join(contextDir, "druid-coldstarter") + buildCtx, buildCancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer buildCancel() + buildCmd := exec.CommandContext(buildCtx, "go", "build", "-o", druid, "./apps/druid") + buildCmd.Dir = RepoRoot(t) + buildCmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH="+runtime.GOARCH) + if out, err := buildCmd.CombinedOutput(); err != nil { + t.Fatalf("linux druid build failed: %v\n%s", err, out) + } + buildCmd = exec.CommandContext(buildCtx, "go", "build", "-o", coldstarter, "./apps/druid-coldstarter") + buildCmd.Dir = RepoRoot(t) + buildCmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH="+runtime.GOARCH) + if out, err := buildCmd.CombinedOutput(); err != nil { + t.Fatalf("linux druid-coldstarter build failed: %v\n%s", err, out) + } + dockerfile := `FROM alpine:3.20 +RUN apk add --no-cache ca-certificates +COPY druid druid-coldstarter /usr/bin/ +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +` + if err := os.WriteFile(filepath.Join(contextDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatal(err) + } + entrypoint := `#!/bin/sh +if [ "$1" = "druid-coldstarter" ] || [ "$1" = "/usr/bin/druid-coldstarter" ]; then + exec "$@" +fi +exec druid "$@" +` + if err := os.WriteFile(filepath.Join(contextDir, "entrypoint.sh"), []byte(entrypoint), 0755); err != nil { + t.Fatal(err) + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + cmd := exec.CommandContext(ctx, "docker", "build", contextDir, "-t", tag) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("docker build failed: %v\n%s", err, out) + } + return tag +} + +func StartRegistry(t *testing.T) int { + t.Helper() + port := FreePort(t) + name := fmt.Sprintf("druid-e2e-registry-%d", time.Now().UnixNano()) + Run(t, "docker", "run", "-d", "--rm", "--name", name, "-p", fmt.Sprintf("127.0.0.1:%d:5000", port), "registry:2") + t.Cleanup(func() { + _ = exec.Command("docker", "rm", "-f", name).Run() + }) + client := &http.Client{Timeout: 2 * time.Second} + deadline := time.Now().Add(30 * time.Second) + for time.Now().Before(deadline) { + resp, err := client.Get(fmt.Sprintf("http://127.0.0.1:%d/v2/", port)) + if err == nil { + _ = resp.Body.Close() + if resp.StatusCode == http.StatusOK { + return port + } + } + time.Sleep(500 * time.Millisecond) + } + t.Fatalf("registry did not become ready on port %d", port) + return 0 +} + +func DockerHostAddress(t *testing.T) string { + t.Helper() + if runtime.GOOS == "darwin" || runtime.GOOS == "windows" { + return "host.docker.internal" + } + gateway := strings.TrimSpace(Run(t, "docker", "network", "inspect", "bridge", "--format", "{{(index .IPAM.Config 0).Gateway}}")) + if gateway == "" || gateway == "" { + return "host.docker.internal" + } + return gateway +} + +func UnixJSONRequest(t *testing.T, socket string, method string, path string, body string) string { + t.Helper() + transport := &http.Transport{ + DialContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { + return net.Dial("unix", socket) + }, + } + client := &http.Client{Transport: transport, Timeout: 5 * time.Minute} + var reader io.Reader + if body != "" { + reader = strings.NewReader(body) + } + req, err := http.NewRequest(method, "http://druid"+path, reader) + if err != nil { + t.Fatal(err) + } + if body != "" { + req.Header.Set("Content-Type", "application/json") + } + resp, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + data, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if resp.StatusCode >= 400 { + t.Fatalf("%s %s failed with %d: %s", method, path, resp.StatusCode, data) + } + return string(data) +} diff --git a/test/integration/kubernetes/kubernetes_cli_test.go b/test/integration/kubernetes/kubernetes_cli_test.go index 3b6b1384..0235832c 100644 --- a/test/integration/kubernetes/kubernetes_cli_test.go +++ b/test/integration/kubernetes/kubernetes_cli_test.go @@ -5,6 +5,8 @@ package kubernetes_test import ( "context" "fmt" + "io" + "net/http" "os" "os/exec" "path/filepath" @@ -20,18 +22,24 @@ func TestKubernetesBackendCLIComplexLifecycle(t *testing.T) { bins := e2e.BuildBinaries(t) port := e2e.FreePort(t) routePort := e2e.FreePort(t) + callbackPort := e2e.FreePort(t) + managementPort := e2e.FreePort(t) + registryPort := e2e.StartRegistry(t) + containerHost := e2e.DockerHostAddress(t) suffix := fmt.Sprintf("%x", time.Now().UnixNano())[:10] namespace := "druid-cli-e2e-" + suffix - pvc := "druid-e2e-" + suffix - ref := fmt.Sprintf("k8s://%s/%s", namespace, pvc) name := "k8s-cli-" + suffix fixture := e2e.WriteFixture(t, filepath.Join(t.TempDir(), "scroll"), name, port, routePort) + workerImage := e2e.BuildDockerImage(t, "druid-cli-e2e:"+name) + importImageIntoK3DIfCurrentContext(t, workerImage) + pushArtifact := fmt.Sprintf("127.0.0.1:%d/druid-e2e/%s:v1", registryPort, name) + runtimeArtifact := fmt.Sprintf("%s:%d/druid-e2e/%s:v1", containerHost, registryPort, name) + e2e.RunEnv(t, []string{"DRUID_REGISTRY_PLAIN_HTTP=true", "HOME=" + bins.Home}, bins.Druid, "push", pushArtifact, fixture.Dir) e2e.Run(t, "kubectl", "create", "namespace", namespace) t.Cleanup(func() { e2e.Run(t, "kubectl", "delete", "namespace", namespace, "--ignore-not-found=true", "--wait=false") }) - seedPVC(t, namespace, pvc, fixture.Dir) kubeconfig := writeCurrentKubeconfig(t) socket := filepath.Join(os.TempDir(), fmt.Sprintf("druid-k8s-%d.sock", time.Now().UnixNano())) @@ -40,25 +48,34 @@ func TestKubernetesBackendCLIComplexLifecycle(t *testing.T) { logs := e2e.StartDaemon(t, bins, "kubernetes", socket, stateDir, []string{ "--k8s-namespace", namespace, "--k8s-kubeconfig", kubeconfig, + "--k8s-pull-image", workerImage, "--hubble-relay-addr", "127.0.0.1:9", - }, nil) + "--worker-callback-listen", fmt.Sprintf(":%d", callbackPort), + "--worker-callback-url", fmt.Sprintf("http://%s:%d", containerHost, callbackPort), + "--listen", fmt.Sprintf(":%d", managementPort), + "--worker-daemon-url", fmt.Sprintf("http://%s:%d", containerHost, managementPort), + }, []string{"DRUID_REGISTRY_PLAIN_HTTP=true"}) t.Cleanup(func() { if t.Failed() { t.Logf("druid daemon logs:\n%s", logs.String()) } }) - created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "--no-start", "--scroll-root", ref, "--data-root", ref, "seeded-artifact", fixture.Name) + created := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "create", "-p", fmt.Sprintf("%d:http", fixture.RoutePort), runtimeArtifact, fixture.Name) if created.Status != "created" { t.Fatalf("created status = %s, want created", created.Status) } + rootPrefix := "k8s://" + namespace + "/" + if !strings.HasPrefix(created.Root, rootPrefix) { + t.Fatalf("created root = %s, want %s", created.Root, rootPrefix) + } + pvc := strings.TrimPrefix(created.Root, rootPrefix) targets := e2e.RunClientJSON[[]e2e.RuntimeRoutingTarget](t, bins, socket, "routing", "targets", created.ID) target := findTarget(t, targets, fixture) if target.Namespace != namespace || target.ServicePort != fixture.Port { t.Fatalf("target = %#v, want namespace %s service port %d", target, namespace, fixture.Port) } - e2e.RunClient(t, bins, socket, "routing", "apply", created.ID, "--file", fixture.RoutingFile) started := e2e.RunClientJSON[e2e.RuntimeScroll](t, bins, socket, "start", created.ID) if started.Status != "running" { t.Fatalf("started status = %s, want running", started.Status) @@ -76,10 +93,21 @@ func TestKubernetesBackendCLIComplexLifecycle(t *testing.T) { t.Fatalf("USER_ENV = %q, want fixture", env["USER_ENV"]) } + e2e.RunClient(t, bins, socket, "dev", created.ID, "--watch", "data", "--command", "record") + webdavTarget := findWebDAVTarget(t, e2e.RunClientJSON[[]e2e.RuntimeRoutingTarget](t, bins, socket, "routing", "targets", created.ID)) + webdavPort := e2e.FreePort(t) + waitServiceExists(t, namespace, webdavTarget.ServiceName) + webdavForward := startPortForward(t, namespace, webdavTarget.ServiceName, webdavPort, 8084) + t.Cleanup(webdavForward) + webdavURL := fmt.Sprintf("http://127.0.0.1:%d/webdav/data/dev.txt", webdavPort) + httpPut(t, webdavURL, "dev-write\n") + if got := e2e.WaitHTTP(t, webdavURL); !strings.Contains(got, "dev-write") { + t.Fatalf("webdav file = %q, want dev-write", got) + } + statuses := e2e.RunClientJSON[[]e2e.RuntimePortStatus](t, bins, socket, "ports", created.ID) assertKubernetesPort(t, statuses, fixture) - e2e.RunClient(t, bins, socket, "run", created.ID, "record") if got := readPVCFile(t, namespace, pvc, "data/finite.txt"); !strings.Contains(got, "finite-ok") { t.Fatalf("finite file = %q, want finite-ok", got) } @@ -115,44 +143,14 @@ func requireKubernetes(t *testing.T) { } } -func seedPVC(t *testing.T, namespace string, pvc string, fixtureDir string) { +func importImageIntoK3DIfCurrentContext(t *testing.T, image string) { t.Helper() - applyManifest(t, fmt.Sprintf(`apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: %s - namespace: %s -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: v1 -kind: Pod -metadata: - name: seed-%s - namespace: %s -spec: - restartPolicy: Never - containers: - - name: seed - image: busybox:1.36 - command: ["sh", "-c", "sleep 3600"] - volumeMounts: - - name: runtime - mountPath: /runtime - volumes: - - name: runtime - persistentVolumeClaim: - claimName: %s -`, pvc, namespace, pvc, namespace, pvc)) - seedPod := "seed-" + pvc - e2e.Run(t, "kubectl", "wait", "-n", namespace, "--for=condition=Ready", "pod/"+seedPod, "--timeout=180s") - e2e.Run(t, "kubectl", "cp", filepath.Join(fixtureDir, "scroll.yaml"), namespace+"/"+seedPod+":/runtime/scroll.yaml") - e2e.Run(t, "kubectl", "exec", "-n", namespace, seedPod, "--", "sh", "-c", "mkdir -p /runtime/data/public") - e2e.Run(t, "kubectl", "delete", "pod", "-n", namespace, seedPod, "--wait=true") + contextName := strings.TrimSpace(e2e.Run(t, "kubectl", "config", "current-context")) + if !strings.HasPrefix(contextName, "k3d-") { + return + } + cluster := strings.TrimPrefix(contextName, "k3d-") + e2e.Run(t, "k3d", "image", "import", image, "--cluster", cluster) } func applyManifest(t *testing.T, manifest string) { @@ -185,6 +183,17 @@ func findTarget(t *testing.T, targets []e2e.RuntimeRoutingTarget, fixture e2e.Fi return e2e.RuntimeRoutingTarget{} } +func findWebDAVTarget(t *testing.T, targets []e2e.RuntimeRoutingTarget) e2e.RuntimeRoutingTarget { + t.Helper() + for _, target := range targets { + if target.PortName == "webdav" { + return target + } + } + t.Fatalf("webdav target not found in %#v", targets) + return e2e.RuntimeRoutingTarget{} +} + func startPortForward(t *testing.T, namespace string, service string, localPort int, remotePort int) func() { t.Helper() ctx, cancel := context.WithCancel(context.Background()) @@ -312,6 +321,23 @@ func kubectlOutput(args ...string) (string, error) { return string(out), err } +func httpPut(t *testing.T, url string, body string) { + t.Helper() + req, err := http.NewRequest(http.MethodPut, url, strings.NewReader(body)) + if err != nil { + t.Fatal(err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode >= 400 { + data, _ := io.ReadAll(resp.Body) + t.Fatalf("PUT %s failed with %d: %s", url, resp.StatusCode, data) + } +} + func waitKubernetesResourcesGone(t *testing.T, namespace string, pvc string, resource string) { t.Helper() selector := "app.kubernetes.io/managed-by=druid,druid.gg/scroll-id=" + pvc diff --git a/test/mock/services.go b/test/mock/services.go index 823fbfd0..231950e0 100644 --- a/test/mock/services.go +++ b/test/mock/services.go @@ -47,10 +47,10 @@ func (m *MockAuthorizerServiceInterface) EXPECT() *MockAuthorizerServiceInterfac } // CheckHeader mocks base method. -func (m *MockAuthorizerServiceInterface) CheckHeader(r *fiber.Ctx) (*time.Time, error) { +func (m *MockAuthorizerServiceInterface) CheckHeader(r *fiber.Ctx) (*ports.AuthContext, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CheckHeader", r) - ret0, _ := ret[0].(*time.Time) + ret0, _ := ret[0].(*ports.AuthContext) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -62,32 +62,32 @@ func (mr *MockAuthorizerServiceInterfaceMockRecorder) CheckHeader(r any) *gomock } // CheckQuery mocks base method. -func (m *MockAuthorizerServiceInterface) CheckQuery(token string) (*time.Time, error) { +func (m *MockAuthorizerServiceInterface) CheckQuery(runtimeID string, token string) (*ports.AuthContext, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckQuery", token) - ret0, _ := ret[0].(*time.Time) + ret := m.ctrl.Call(m, "CheckQuery", runtimeID, token) + ret0, _ := ret[0].(*ports.AuthContext) ret1, _ := ret[1].(error) return ret0, ret1 } // CheckQuery indicates an expected call of CheckQuery. -func (mr *MockAuthorizerServiceInterfaceMockRecorder) CheckQuery(token any) *gomock.Call { +func (mr *MockAuthorizerServiceInterfaceMockRecorder) CheckQuery(runtimeID, token any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckQuery", reflect.TypeOf((*MockAuthorizerServiceInterface)(nil).CheckQuery), token) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckQuery", reflect.TypeOf((*MockAuthorizerServiceInterface)(nil).CheckQuery), runtimeID, token) } // GenerateQueryToken mocks base method. -func (m *MockAuthorizerServiceInterface) GenerateQueryToken() string { +func (m *MockAuthorizerServiceInterface) GenerateQueryToken(runtimeID string, ownerID string) string { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GenerateQueryToken") + ret := m.ctrl.Call(m, "GenerateQueryToken", runtimeID, ownerID) ret0, _ := ret[0].(string) return ret0 } // GenerateQueryToken indicates an expected call of GenerateQueryToken. -func (mr *MockAuthorizerServiceInterfaceMockRecorder) GenerateQueryToken() *gomock.Call { +func (mr *MockAuthorizerServiceInterfaceMockRecorder) GenerateQueryToken(runtimeID, ownerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateQueryToken", reflect.TypeOf((*MockAuthorizerServiceInterface)(nil).GenerateQueryToken)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateQueryToken", reflect.TypeOf((*MockAuthorizerServiceInterface)(nil).GenerateQueryToken), runtimeID, ownerID) } // MockScrollServiceInterface is a mock of ScrollServiceInterface interface. @@ -325,19 +325,47 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) Attach(commandName, data any) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Attach), commandName, data) } +// BackupRuntime mocks base method. +func (m *MockRuntimeBackendInterface) BackupRuntime(ctx context.Context, root, artifact string, registryCredentials []domain.RegistryCredential) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BackupRuntime", ctx, root, artifact, registryCredentials) + ret0, _ := ret[0].(error) + return ret0 +} + +// BackupRuntime indicates an expected call of BackupRuntime. +func (mr *MockRuntimeBackendInterfaceMockRecorder) BackupRuntime(ctx, root, artifact, registryCredentials any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).BackupRuntime), ctx, root, artifact, registryCredentials) +} + +// DeleteRuntime mocks base method. +func (m *MockRuntimeBackendInterface) DeleteRuntime(root string, purgeData bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRuntime", root, purgeData) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRuntime indicates an expected call of DeleteRuntime. +func (mr *MockRuntimeBackendInterfaceMockRecorder) DeleteRuntime(root, purgeData any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).DeleteRuntime), root, purgeData) +} + // ExpectedPorts mocks base method. -func (m *MockRuntimeBackendInterface) ExpectedPorts(dataRoot string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { +func (m *MockRuntimeBackendInterface) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExpectedPorts", dataRoot, commands, globalPorts) + ret := m.ctrl.Call(m, "ExpectedPorts", root, commands, globalPorts) ret0, _ := ret[0].([]domain.RuntimePortStatus) ret1, _ := ret[1].(error) return ret0, ret1 } // ExpectedPorts indicates an expected call of ExpectedPorts. -func (mr *MockRuntimeBackendInterfaceMockRecorder) ExpectedPorts(dataRoot, commands, globalPorts any) *gomock.Call { +func (mr *MockRuntimeBackendInterfaceMockRecorder) ExpectedPorts(root, commands, globalPorts any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpectedPorts", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).ExpectedPorts), dataRoot, commands, globalPorts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpectedPorts", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).ExpectedPorts), root, commands, globalPorts) } // Name mocks base method. @@ -354,19 +382,62 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) Name() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Name)) } +// StartDev mocks base method. +func (m *MockRuntimeBackendInterface) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartDev", ctx, action) + ret0, _ := ret[0].(error) + return ret0 +} + +// StartDev indicates an expected call of StartDev. +func (mr *MockRuntimeBackendInterfaceMockRecorder) StartDev(ctx, action any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartDev", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StartDev), ctx, action) +} + // ReadScrollFile mocks base method. -func (m *MockRuntimeBackendInterface) ReadScrollFile(scrollRoot string) ([]byte, error) { +func (m *MockRuntimeBackendInterface) ReadScrollFile(root string) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadScrollFile", scrollRoot) + ret := m.ctrl.Call(m, "ReadScrollFile", root) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // ReadScrollFile indicates an expected call of ReadScrollFile. -func (mr *MockRuntimeBackendInterfaceMockRecorder) ReadScrollFile(scrollRoot any) *gomock.Call { +func (mr *MockRuntimeBackendInterfaceMockRecorder) ReadScrollFile(root any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadScrollFile", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).ReadScrollFile), scrollRoot) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadScrollFile", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).ReadScrollFile), root) +} + +// RestoreRuntime mocks base method. +func (m *MockRuntimeBackendInterface) RestoreRuntime(ctx context.Context, root, artifact string, registryCredentials []domain.RegistryCredential) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreRuntime", ctx, root, artifact, registryCredentials) + ret0, _ := ret[0].(error) + return ret0 +} + +// RestoreRuntime indicates an expected call of RestoreRuntime. +func (mr *MockRuntimeBackendInterfaceMockRecorder) RestoreRuntime(ctx, root, artifact, registryCredentials any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).RestoreRuntime), ctx, root, artifact, registryCredentials) +} + +// RoutingTargets mocks base method. +func (m *MockRuntimeBackendInterface) RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RoutingTargets", root, commands, globalPorts) + ret0, _ := ret[0].([]domain.RuntimeRoutingTarget) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RoutingTargets indicates an expected call of RoutingTargets. +func (mr *MockRuntimeBackendInterfaceMockRecorder) RoutingTargets(root, commands, globalPorts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoutingTargets", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).RoutingTargets), root, commands, globalPorts) } // RunCommand mocks base method. @@ -385,56 +456,59 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) RunCommand(command any) *gomo } // Signal mocks base method. -func (m *MockRuntimeBackendInterface) Signal(commandName, target, signal, dataRoot string) error { +func (m *MockRuntimeBackendInterface) Signal(commandName, target, signal, root string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Signal", commandName, target, signal, dataRoot) + ret := m.ctrl.Call(m, "Signal", commandName, target, signal, root) ret0, _ := ret[0].(error) return ret0 } // Signal indicates an expected call of Signal. -func (mr *MockRuntimeBackendInterfaceMockRecorder) Signal(commandName, target, signal, dataRoot any) *gomock.Call { +func (mr *MockRuntimeBackendInterfaceMockRecorder) Signal(commandName, target, signal, root any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Signal", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Signal), commandName, target, signal, dataRoot) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Signal", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Signal), commandName, target, signal, root) } -// MockRuntimeMaterializerInterface is a mock of RuntimeMaterializerInterface interface. -type MockRuntimeMaterializerInterface struct { - ctrl *gomock.Controller - recorder *MockRuntimeMaterializerInterfaceMockRecorder - isgomock struct{} +// SpawnPullWorker mocks base method. +func (m *MockRuntimeBackendInterface) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorkerAction) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SpawnPullWorker", ctx, action) + ret0, _ := ret[0].(error) + return ret0 } -// MockRuntimeMaterializerInterfaceMockRecorder is the mock recorder for MockRuntimeMaterializerInterface. -type MockRuntimeMaterializerInterfaceMockRecorder struct { - mock *MockRuntimeMaterializerInterface +// SpawnPullWorker indicates an expected call of SpawnPullWorker. +func (mr *MockRuntimeBackendInterfaceMockRecorder) SpawnPullWorker(ctx, action any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SpawnPullWorker", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).SpawnPullWorker), ctx, action) } -// NewMockRuntimeMaterializerInterface creates a new mock instance. -func NewMockRuntimeMaterializerInterface(ctrl *gomock.Controller) *MockRuntimeMaterializerInterface { - mock := &MockRuntimeMaterializerInterface{ctrl: ctrl} - mock.recorder = &MockRuntimeMaterializerInterfaceMockRecorder{mock} - return mock +// StopRuntime mocks base method. +func (m *MockRuntimeBackendInterface) StopRuntime(root string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StopRuntime", root) + ret0, _ := ret[0].(error) + return ret0 } -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRuntimeMaterializerInterface) EXPECT() *MockRuntimeMaterializerInterfaceMockRecorder { - return m.recorder +// StopRuntime indicates an expected call of StopRuntime. +func (mr *MockRuntimeBackendInterfaceMockRecorder) StopRuntime(root any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StopRuntime), root) } -// MaterializeScroll mocks base method. -func (m *MockRuntimeMaterializerInterface) MaterializeScroll(ctx context.Context, artifact, requestedName string) (*ports.RuntimeMaterialization, error) { +// StopDev mocks base method. +func (m *MockRuntimeBackendInterface) StopDev(ctx context.Context, root string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MaterializeScroll", ctx, artifact, requestedName) - ret0, _ := ret[0].(*ports.RuntimeMaterialization) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "StopDev", ctx, root) + ret0, _ := ret[0].(error) + return ret0 } -// MaterializeScroll indicates an expected call of MaterializeScroll. -func (mr *MockRuntimeMaterializerInterfaceMockRecorder) MaterializeScroll(ctx, artifact, requestedName any) *gomock.Call { +// StopDev indicates an expected call of StopDev. +func (mr *MockRuntimeBackendInterfaceMockRecorder) StopDev(ctx, root any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaterializeScroll", reflect.TypeOf((*MockRuntimeMaterializerInterface)(nil).MaterializeScroll), ctx, artifact, requestedName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopDev", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StopDev), ctx, root) } // MockBroadcastChannelInterface is a mock of BroadcastChannelInterface interface. @@ -593,6 +667,21 @@ func (mr *MockOciRegistryInterfaceMockRecorder) CanUpdateTag(descriptor, folder, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CanUpdateTag", reflect.TypeOf((*MockOciRegistryInterface)(nil).CanUpdateTag), descriptor, folder, tag) } +// FetchFile mocks base method. +func (m *MockOciRegistryInterface) FetchFile(artifact, filePath string) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchFile", artifact, filePath) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchFile indicates an expected call of FetchFile. +func (mr *MockOciRegistryInterfaceMockRecorder) FetchFile(artifact, filePath any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchFile", reflect.TypeOf((*MockOciRegistryInterface)(nil).FetchFile), artifact, filePath) +} + // GetRepo mocks base method. func (m *MockOciRegistryInterface) GetRepo(repoUrl string) (*remote.Repository, error) { m.ctrl.T.Helper() @@ -651,40 +740,19 @@ func (mr *MockOciRegistryInterfaceMockRecorder) Push(folder, repo, tag, override return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockOciRegistryInterface)(nil).Push), folder, repo, tag, overrides, packMeta, scrollFile) } -// MockCronManagerInterface is a mock of CronManagerInterface interface. -type MockCronManagerInterface struct { - ctrl *gomock.Controller - recorder *MockCronManagerInterfaceMockRecorder - isgomock struct{} -} - -// MockCronManagerInterfaceMockRecorder is the mock recorder for MockCronManagerInterface. -type MockCronManagerInterfaceMockRecorder struct { - mock *MockCronManagerInterface -} - -// NewMockCronManagerInterface creates a new mock instance. -func NewMockCronManagerInterface(ctrl *gomock.Controller) *MockCronManagerInterface { - mock := &MockCronManagerInterface{ctrl: ctrl} - mock.recorder = &MockCronManagerInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCronManagerInterface) EXPECT() *MockCronManagerInterfaceMockRecorder { - return m.recorder -} - -// Init mocks base method. -func (m *MockCronManagerInterface) Init() { +// ResolveDigest mocks base method. +func (m *MockOciRegistryInterface) ResolveDigest(artifact string) (string, error) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Init") + ret := m.ctrl.Call(m, "ResolveDigest", artifact) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// Init indicates an expected call of Init. -func (mr *MockCronManagerInterfaceMockRecorder) Init() *gomock.Call { +// ResolveDigest indicates an expected call of ResolveDigest. +func (mr *MockOciRegistryInterfaceMockRecorder) ResolveDigest(artifact any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockCronManagerInterface)(nil).Init)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveDigest", reflect.TypeOf((*MockOciRegistryInterface)(nil).ResolveDigest), artifact) } // MockQueueManagerInterface is a mock of QueueManagerInterface interface. @@ -711,34 +779,6 @@ func (m *MockQueueManagerInterface) EXPECT() *MockQueueManagerInterfaceMockRecor return m.recorder } -// AddAndRememberItem mocks base method. -func (m *MockQueueManagerInterface) AddAndRememberItem(cmd string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddAndRememberItem", cmd) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddAndRememberItem indicates an expected call of AddAndRememberItem. -func (mr *MockQueueManagerInterfaceMockRecorder) AddAndRememberItem(cmd any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAndRememberItem", reflect.TypeOf((*MockQueueManagerInterface)(nil).AddAndRememberItem), cmd) -} - -// AddShutdownItem mocks base method. -func (m *MockQueueManagerInterface) AddShutdownItem(cmd string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddShutdownItem", cmd) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddShutdownItem indicates an expected call of AddShutdownItem. -func (mr *MockQueueManagerInterfaceMockRecorder) AddShutdownItem(cmd any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddShutdownItem", reflect.TypeOf((*MockQueueManagerInterface)(nil).AddShutdownItem), cmd) -} - // AddTempItem mocks base method. func (m *MockQueueManagerInterface) AddTempItem(cmd string) error { m.ctrl.T.Helper() @@ -970,18 +1010,6 @@ func (mr *MockColdStarterInterfaceMockRecorder) Stop() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockColdStarterInterface)(nil).Stop)) } -// StopWithDeplay mocks base method. -func (m *MockColdStarterInterface) StopWithDeplay(arg0 uint) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StopWithDeplay", arg0) -} - -// StopWithDeplay indicates an expected call of StopWithDeplay. -func (mr *MockColdStarterInterfaceMockRecorder) StopWithDeplay(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopWithDeplay", reflect.TypeOf((*MockColdStarterInterface)(nil).StopWithDeplay), arg0) -} - // MockColdStarterServerInterface is a mock of ColdStarterServerInterface interface. type MockColdStarterServerInterface struct { ctrl *gomock.Controller @@ -1186,6 +1214,18 @@ func (mr *MockWatchServiceInterfaceMockRecorder) Subscribe() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockWatchServiceInterface)(nil).Subscribe)) } +// Trigger mocks base method. +func (m *MockWatchServiceInterface) Trigger() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Trigger") +} + +// Trigger indicates an expected call of Trigger. +func (mr *MockWatchServiceInterfaceMockRecorder) Trigger() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trigger", reflect.TypeOf((*MockWatchServiceInterface)(nil).Trigger)) +} + // Unsubscribe mocks base method. func (m *MockWatchServiceInterface) Unsubscribe(client chan *[]byte) { m.ctrl.T.Helper() From 30128b901c57a8f5105b48d2290b47fe19d3161b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Schottst=C3=A4dt?= Date: Sat, 16 May 2026 00:27:03 +0200 Subject: [PATCH 5/6] feat: add namespace support for runtime resources - Introduced a new `namespace` field in the OpenAPI specification for runtime resources, applicable to Kubernetes backends. - Updated the runtime supervisor and related services to handle the new namespace parameter during scroll creation and management. - Enhanced error handling and logging in the runtime daemon for better observability. - Refactored related tests to accommodate the namespace changes and ensure proper functionality. --- api/openapi.yaml | 6 + apps/druid-coldstarter/adapters/cli/root.go | 6 +- .../adapters/cli/root_test.go | 15 +- .../adapters/filesystem/status_writer.go | 46 -- .../core/ports/status_writer.go | 7 - .../core/services/coldstarter.go | 105 ++- .../core/services/coldstarter_test.go | 114 ++- apps/druid/adapters/cli/client/dev.go | 3 + apps/druid/adapters/cli/daemon.go | 9 +- .../adapters/http/handlers/middleware.go | 46 ++ .../adapters/http/handlers/scroll_handler.go | 16 +- apps/druid/core/services/runtime_lifecycle.go | 17 +- .../core/services/runtime_materialization.go | 4 +- apps/druid/core/services/runtime_session.go | 4 +- .../druid/core/services/runtime_supervisor.go | 110 ++- .../core/services/runtime_supervisor_test.go | 118 ++- examples/container-lab/scroll.yaml | 2 - examples/minecraft/scroll.yaml | 4 +- examples/mysql/scroll.yaml | 1 - examples/static-web/scroll.yaml | 1 - internal/api/generated.go | 86 +- internal/core/domain/runtime_scroll.go | 10 +- internal/core/domain/scroll.go | 26 +- internal/core/ports/services_ports.go | 11 + internal/core/services/coldstarter.go | 22 +- .../coldstarter/handler/lua_handler.go | 21 +- .../core/services/runtime_scroll_manager.go | 10 +- .../services/runtime_scroll_manager_test.go | 67 +- internal/runtime/backend.go | 21 +- internal/runtime/backend_factory_test.go | 12 +- internal/runtime/docker/backend.go | 50 +- internal/runtime/docker/backend_names_test.go | 44 + .../docker/state_store.go} | 52 +- .../docker/state_store_test.go} | 21 +- internal/runtime/kubernetes/backend.go | 750 +++++++++++++++--- internal/runtime/kubernetes/names.go | 27 +- internal/runtime/kubernetes/names_test.go | 22 + internal/runtime/kubernetes/resources.go | 22 +- internal/runtime/kubernetes/resources_test.go | 115 ++- internal/runtime/kubernetes/state_store.go | 11 +- .../runtime/kubernetes/state_store_test.go | 17 +- test/integration/docker/docker_cli_test.go | 9 +- test/integration/internal/e2e/harness.go | 1 - test/mock/services.go | 182 ++++- 44 files changed, 1725 insertions(+), 518 deletions(-) delete mode 100644 apps/druid-coldstarter/adapters/filesystem/status_writer.go delete mode 100644 apps/druid-coldstarter/core/ports/status_writer.go create mode 100644 apps/druid/adapters/http/handlers/middleware.go create mode 100644 internal/runtime/docker/backend_names_test.go rename internal/{core/services/runtime_state_store.go => runtime/docker/state_store.go} (86%) rename internal/{core/services/runtime_state_store_test.go => runtime/docker/state_store_test.go} (73%) create mode 100644 internal/runtime/kubernetes/names_test.go diff --git a/api/openapi.yaml b/api/openapi.yaml index c30800b7..230155bd 100644 --- a/api/openapi.yaml +++ b/api/openapi.yaml @@ -89,6 +89,9 @@ components: owner_id: type: string description: Runtime owner id used for customer-facing route authorization. + namespace: + type: string + description: Kubernetes namespace for runtime resources. Ignored by non-Kubernetes backends. registry_credentials: type: array items: @@ -110,6 +113,9 @@ components: owner_id: type: string description: Runtime owner id used for customer-facing route authorization. + namespace: + type: string + description: Kubernetes namespace for runtime resources. Ignored by non-Kubernetes backends. registry_credentials: type: array items: diff --git a/apps/druid-coldstarter/adapters/cli/root.go b/apps/druid-coldstarter/adapters/cli/root.go index 25060749..1319e95b 100644 --- a/apps/druid-coldstarter/adapters/cli/root.go +++ b/apps/druid-coldstarter/adapters/cli/root.go @@ -7,14 +7,12 @@ import ( "os/signal" "syscall" - "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/filesystem" "github.com/highcard-dev/daemon/apps/druid-coldstarter/core/services" "github.com/spf13/cobra" ) const ( - rootEnv = "DRUID_ROOT" - statusFileEnv = "DRUID_COLDSTARTER_STATUS_FILE" + rootEnv = "DRUID_ROOT" ) func NewRootCommand() *cobra.Command { @@ -28,7 +26,7 @@ func NewRootCommand() *cobra.Command { } ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM, syscall.SIGINT) defer stop() - return services.NewColdstarterService(filesystem.NewStatusWriter()).Run(ctx, root, os.Getenv(statusFileEnv)) + return services.NewColdstarterService().Run(ctx, root) }, } cmd.SilenceUsage = true diff --git a/apps/druid-coldstarter/adapters/cli/root_test.go b/apps/druid-coldstarter/adapters/cli/root_test.go index 46dbe2b0..c8c06e8a 100644 --- a/apps/druid-coldstarter/adapters/cli/root_test.go +++ b/apps/druid-coldstarter/adapters/cli/root_test.go @@ -7,17 +7,10 @@ import ( func TestRootCommandHasNoRuntimeFlags(t *testing.T) { cmd := NewRootCommand() - if cmd.Flags().Lookup("root") != nil { - t.Fatal("did not expect root flag") - } - if cmd.Flags().Lookup("status-file") != nil { - t.Fatal("did not expect status-file flag") - } - if cmd.Flags().Lookup("scroll-root") != nil { - t.Fatal("did not expect scroll-root flag") - } - if cmd.Flags().Lookup("runtime-config") != nil { - t.Fatal("did not expect runtime-config flag") + for _, flag := range []string{"root", "scroll-root", "runtime-config", "status" + "-" + "file"} { + if cmd.Flags().Lookup(flag) != nil { + t.Fatalf("did not expect %s flag", flag) + } } } diff --git a/apps/druid-coldstarter/adapters/filesystem/status_writer.go b/apps/druid-coldstarter/adapters/filesystem/status_writer.go deleted file mode 100644 index 8c4ec9b2..00000000 --- a/apps/druid-coldstarter/adapters/filesystem/status_writer.go +++ /dev/null @@ -1,46 +0,0 @@ -package filesystem - -import ( - "encoding/json" - "os" - "path/filepath" - "time" - - "github.com/highcard-dev/daemon/internal/core/domain" -) - -type StatusWriter struct{} - -type status struct { - FinishedAt time.Time `json:"finished_at"` - PortName string `json:"port_name,omitempty"` - Port int `json:"port,omitempty"` - Protocol string `json:"protocol,omitempty"` -} - -func NewStatusWriter() *StatusWriter { - return &StatusWriter{} -} - -func (w *StatusWriter) Write(root string, statusFile string, port *domain.AugmentedPort) error { - path := statusFile - if !filepath.IsAbs(path) { - path = filepath.Join(root, statusFile) - } - - data := status{FinishedAt: time.Now().UTC()} - if port != nil { - data.PortName = port.Name - data.Port = port.Port.Port - data.Protocol = port.Protocol - } - - encoded, err := json.MarshalIndent(data, "", " ") - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - return os.WriteFile(path, append(encoded, '\n'), 0644) -} diff --git a/apps/druid-coldstarter/core/ports/status_writer.go b/apps/druid-coldstarter/core/ports/status_writer.go deleted file mode 100644 index 5733a925..00000000 --- a/apps/druid-coldstarter/core/ports/status_writer.go +++ /dev/null @@ -1,7 +0,0 @@ -package ports - -import "github.com/highcard-dev/daemon/internal/core/domain" - -type StatusWriter interface { - Write(root string, statusFile string, port *domain.AugmentedPort) error -} diff --git a/apps/druid-coldstarter/core/services/coldstarter.go b/apps/druid-coldstarter/core/services/coldstarter.go index 92f3dc0f..c06715e0 100644 --- a/apps/druid-coldstarter/core/services/coldstarter.go +++ b/apps/druid-coldstarter/core/services/coldstarter.go @@ -3,50 +3,111 @@ package services import ( "context" "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" - "github.com/highcard-dev/daemon/apps/druid-coldstarter/core/ports" + "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/utils/logger" "go.uber.org/zap" ) -type ColdstarterService struct { - statusWriter ports.StatusWriter +type ColdstarterService struct{} + +type envPortService struct { + ports []*domain.AugmentedPort } -func NewColdstarterService(statusWriter ports.StatusWriter) *ColdstarterService { - return &ColdstarterService{statusWriter: statusWriter} +func NewColdstarterService() *ColdstarterService { + return &ColdstarterService{} } -func (s *ColdstarterService) Run(ctx context.Context, root string, statusFile string) error { - scrollService, err := services.NewScrollService(root) +func (s *ColdstarterService) Run(ctx context.Context, root string) error { + portService, err := portServiceFromEnv(root) if err != nil { - return fmt.Errorf("failed to load scroll: %w", err) - } - - currentScroll := scrollService.GetCurrent() - if len(currentScroll.Ports) == 0 { - return fmt.Errorf("no ports found in scroll") + return err } - logger.Log().Info("Coldstart scroll loaded", zap.String("name", currentScroll.Name), zap.Any("version", currentScroll.Version), zap.Any("ports", currentScroll.Ports)) + logger.Log().Info("Coldstart ports loaded", zap.Any("ports", portService.GetPorts())) - portService := services.NewPortServiceWithScrollFile(¤tScroll.File) - coldStarter := services.NewColdStarter(portService, nil, scrollService.GetDir()) + coldStarter := services.NewColdStarter(portService, nil, root) finish := coldStarter.Start(ctx) select { case <-ctx.Done(): coldStarter.Stop() return ctx.Err() - case port := <-finish: + case <-finish: coldStarter.Stop() - if statusFile != "" && s.statusWriter != nil { - if err := s.statusWriter.Write(root, statusFile, port); err != nil { - return err - } - } logger.Log().Info("Coldstarter finished") return nil } } + +func (s *envPortService) GetPorts() []*domain.AugmentedPort { + return s.ports +} + +func portServiceFromEnv(root string) (*envPortService, error) { + ports := []*domain.AugmentedPort{} + vars := map[string]string{} + for _, entry := range os.Environ() { + key, value, ok := strings.Cut(entry, "=") + if !ok { + continue + } + if name, ok := strings.CutPrefix(key, "DRUID_COLDSTARTER_VAR_"); ok { + if name != strings.ToUpper(name) { + return nil, fmt.Errorf("%s must be uppercase", key) + } + vars[name] = value + continue + } + if !strings.HasPrefix(key, "DRUID_PORT_") || !strings.HasSuffix(key, "_COLDSTARTER") { + continue + } + if key != strings.ToUpper(key) { + return nil, fmt.Errorf("%s must be uppercase", key) + } + handler := value + suffix := strings.TrimSuffix(strings.TrimPrefix(key, "DRUID_PORT_"), "_COLDSTARTER") + portValue := os.Getenv("DRUID_PORT_" + suffix) + if portValue == "" { + return nil, fmt.Errorf("DRUID_PORT_%s is required when %s is set", suffix, key) + } + if handler == "" { + return nil, fmt.Errorf("%s must not be empty", key) + } + if handler != "generic" { + path := filepath.Join(root, filepath.Clean(handler)) + if rel, err := filepath.Rel(root, path); err != nil || rel == ".." || filepath.IsAbs(rel) || strings.HasPrefix(rel, "../") { + return nil, fmt.Errorf("%s must be generic or a path below DRUID_ROOT", key) + } + } + port, err := strconv.Atoi(portValue) + if err != nil { + return nil, fmt.Errorf("DRUID_PORT_%s must be a port number: %w", suffix, err) + } + protocol := strings.ToLower(os.Getenv("DRUID_PORT_" + suffix + "_PROTOCOL")) + if protocol == "" { + protocol = "tcp" + } + ports = append(ports, &domain.AugmentedPort{ + Port: domain.Port{ + Name: strings.ToLower(suffix), + Port: port, + Protocol: protocol, + }, + ColdstarterHandler: handler, + ColdstarterVars: vars, + InactiveSince: time.Now(), + }) + } + if len(ports) == 0 { + return nil, fmt.Errorf("no coldstarter ports configured") + } + return &envPortService{ports: ports}, nil +} diff --git a/apps/druid-coldstarter/core/services/coldstarter_test.go b/apps/druid-coldstarter/core/services/coldstarter_test.go index b6dc7c0d..a667cda6 100644 --- a/apps/druid-coldstarter/core/services/coldstarter_test.go +++ b/apps/druid-coldstarter/core/services/coldstarter_test.go @@ -6,33 +6,23 @@ import ( "os" "path/filepath" "strconv" + "strings" "testing" "time" - - "github.com/highcard-dev/daemon/apps/druid-coldstarter/adapters/filesystem" ) -func TestColdstarterRunServesGenericPortAndWritesStatus(t *testing.T) { +func TestColdstarterRunServesGenericPortFromEnv(t *testing.T) { root := t.TempDir() port := freeTCPPort(t) - scroll := []byte(`name: test/coldstarter -version: 0.1.0 -ports: - - name: main - protocol: tcp - port: ` + port + ` - sleep_handler: generic -commands: {} -`) - if err := os.WriteFile(filepath.Join(root, "scroll.yaml"), scroll, 0644); err != nil { - t.Fatal(err) - } + t.Setenv("DRUID_PORT_MAIN", port) + t.Setenv("DRUID_PORT_MAIN_PROTOCOL", "tcp") + t.Setenv("DRUID_PORT_MAIN_COLDSTARTER", "generic") ctx, cancel := context.WithCancel(context.Background()) defer cancel() errCh := make(chan error, 1) go func() { - errCh <- NewColdstarterService(filesystem.NewStatusWriter()).Run(ctx, root, ".coldstarter.json") + errCh <- NewColdstarterService().Run(ctx, root) }() conn := dialTCP(t, "127.0.0.1:"+port) @@ -47,8 +37,96 @@ commands: {} case <-time.After(3 * time.Second): t.Fatal("coldstarter did not finish") } - if _, err := os.Stat(filepath.Join(root, ".coldstarter.json")); err != nil { - t.Fatalf("status file missing: %v", err) + if _, err := os.Stat(filepath.Join(root, ".coldstarter.json")); !os.IsNotExist(err) { + t.Fatalf("status file exists or stat failed: %v", err) + } +} + +func TestColdstarterRunExitsFromSecondaryGenericPort(t *testing.T) { + root := t.TempDir() + mainPort := freeTCPPort(t) + rconPort := freeTCPPort(t) + t.Setenv("DRUID_PORT_MAIN", mainPort) + t.Setenv("DRUID_PORT_MAIN_PROTOCOL", "tcp") + t.Setenv("DRUID_PORT_MAIN_COLDSTARTER", "generic") + t.Setenv("DRUID_PORT_RCON", rconPort) + t.Setenv("DRUID_PORT_RCON_PROTOCOL", "tcp") + t.Setenv("DRUID_PORT_RCON_COLDSTARTER", "generic") + + errCh := make(chan error, 1) + go func() { + errCh <- NewColdstarterService().Run(context.Background(), root) + }() + + conn := dialTCP(t, "127.0.0.1:"+rconPort) + _, _ = conn.Write([]byte("wake")) + _ = conn.Close() + + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + case <-time.After(3 * time.Second): + t.Fatal("coldstarter did not finish") + } +} + +func TestColdstarterRejectsMissingPortEnv(t *testing.T) { + t.Setenv("DRUID_PORT_MAIN_COLDSTARTER", "generic") + + err := NewColdstarterService().Run(context.Background(), t.TempDir()) + if err == nil || !strings.Contains(err.Error(), "DRUID_PORT_MAIN is required") { + t.Fatalf("err = %v", err) + } +} + +func TestColdstarterRejectsPathTraversalHandler(t *testing.T) { + t.Setenv("DRUID_PORT_MAIN", freeTCPPort(t)) + t.Setenv("DRUID_PORT_MAIN_COLDSTARTER", "../minecraft.lua") + + err := NewColdstarterService().Run(context.Background(), t.TempDir()) + if err == nil || !strings.Contains(err.Error(), "path below DRUID_ROOT") { + t.Fatalf("err = %v", err) + } +} + +func TestColdstarterRequiresConfiguredPorts(t *testing.T) { + err := NewColdstarterService().Run(context.Background(), t.TempDir()) + if err == nil || !strings.Contains(err.Error(), "no coldstarter ports configured") { + t.Fatalf("err = %v", err) + } +} + +func TestColdstarterAcceptsRelativeLuaHandler(t *testing.T) { + root := t.TempDir() + if err := os.MkdirAll(filepath.Join(root, "packet_handler"), 0755); err != nil { + t.Fatal(err) + } + t.Setenv("DRUID_PORT_MAIN", freeTCPPort(t)) + t.Setenv("DRUID_PORT_MAIN_COLDSTARTER", "packet_handler/minecraft.lua") + t.Setenv("DRUID_COLDSTARTER_VAR_SERVER_LIST_NAME", "Druid idle") + + service, err := portServiceFromEnv(root) + if err != nil { + t.Fatal(err) + } + if got := service.GetPorts()[0].ColdstarterHandler; got != "packet_handler/minecraft.lua" { + t.Fatalf("handler = %q", got) + } + if got := service.GetPorts()[0].ColdstarterVars["SERVER_LIST_NAME"]; got != "Druid idle" { + t.Fatalf("lua var = %q", got) + } +} + +func TestColdstarterRejectsMixedCaseEnvNames(t *testing.T) { + t.Setenv("DRUID_PORT_MAIN", freeTCPPort(t)) + t.Setenv("DRUID_PORT_MAIN_COLDSTARTER", "generic") + t.Setenv("DRUID_COLDSTARTER_VAR_"+"ServerListName", "Druid idle") + + err := NewColdstarterService().Run(context.Background(), t.TempDir()) + if err == nil || !strings.Contains(err.Error(), "must be uppercase") { + t.Fatalf("err = %v", err) } } diff --git a/apps/druid/adapters/cli/client/dev.go b/apps/druid/adapters/cli/client/dev.go index d7a15c38..33326857 100644 --- a/apps/druid/adapters/cli/client/dev.go +++ b/apps/druid/adapters/cli/client/dev.go @@ -15,6 +15,7 @@ import ( "github.com/gofiber/contrib/websocket" "github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2/middleware/adaptor" + runtimehandlers "github.com/highcard-dev/daemon/apps/druid/adapters/http/handlers" "github.com/highcard-dev/daemon/internal/api" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" @@ -174,7 +175,9 @@ func newDevApp(root string, broadcast *domain.BroadcastChannel, queue *devTrigge app := fiber.New(fiber.Config{ DisableStartupMessage: true, RequestMethods: append(fiber.DefaultMethods, "PROPFIND", "MKCOL", "MOVE", "COPY"), + ErrorHandler: runtimehandlers.ErrorHandler, }) + app.Use(runtimehandlers.RequestLogger) server := devServer{root: root, broadcast: broadcast, queue: queue, auth: auth} app.Use(func(c *fiber.Ctx) error { c.Set("Access-Control-Allow-Origin", "*") diff --git a/apps/druid/adapters/cli/daemon.go b/apps/druid/adapters/cli/daemon.go index fc7b6936..fc75a250 100644 --- a/apps/druid/adapters/cli/daemon.go +++ b/apps/druid/adapters/cli/daemon.go @@ -131,7 +131,8 @@ func runRuntimeDaemon() error { Websocket: websocketHandler, } - managementApp := fiber.New(fiber.Config{DisableStartupMessage: true}) + managementApp := fiber.New(fiber.Config{DisableStartupMessage: true, ErrorHandler: runtimehandlers.ErrorHandler}) + managementApp.Use(runtimehandlers.RequestLogger) if runtimeInternalToken != "" { managementApp.Use(func(c *fiber.Ctx) error { path := c.Path() @@ -152,7 +153,8 @@ func runRuntimeDaemon() error { var publicApp *fiber.App if runtimePublicListen != "" { - publicApp = fiber.New(fiber.Config{DisableStartupMessage: true}) + publicApp = fiber.New(fiber.Config{DisableStartupMessage: true, ErrorHandler: runtimehandlers.ErrorHandler}) + publicApp.Use(runtimehandlers.RequestLogger) runtimehandlers.RegisterPublicRoutes(publicApp, handlers) } var callbackApp *fiber.App @@ -160,7 +162,8 @@ func runRuntimeDaemon() error { runtimeWorkerCallbackListen = os.Getenv("DRUID_WORKER_CALLBACK_LISTEN") } if runtimeWorkerCallbackListen != "" { - callbackApp = fiber.New(fiber.Config{DisableStartupMessage: true}) + callbackApp = fiber.New(fiber.Config{DisableStartupMessage: true, ErrorHandler: runtimehandlers.ErrorHandler}) + callbackApp.Use(runtimehandlers.RequestLogger) callbackapi.RegisterHandlers(callbackApp, runtimeCallbackHandler{callbacks: callbacks}) } return listenRuntimeHTTP(managementApp, publicApp, callbackApp, runtime.Store.StateDir()) diff --git a/apps/druid/adapters/http/handlers/middleware.go b/apps/druid/adapters/http/handlers/middleware.go new file mode 100644 index 00000000..a4961d87 --- /dev/null +++ b/apps/druid/adapters/http/handlers/middleware.go @@ -0,0 +1,46 @@ +package handlers + +import ( + "time" + + "github.com/gofiber/fiber/v2" + "github.com/highcard-dev/daemon/internal/utils/logger" + "go.uber.org/zap" +) + +func ErrorHandler(c *fiber.Ctx, err error) error { + status := fiber.StatusInternalServerError + if fiberErr, ok := err.(*fiber.Error); ok { + status = fiberErr.Code + } + if status >= fiber.StatusInternalServerError { + logger.Log().Error("HTTP request failed", + zap.String("method", c.Method()), + zap.String("path", c.Path()), + zap.Int("status", status), + zap.Error(err), + ) + } + return fiber.DefaultErrorHandler(c, err) +} + +func RequestLogger(c *fiber.Ctx) error { + start := time.Now() + err := c.Next() + status := c.Response().StatusCode() + if err != nil { + status = fiber.StatusInternalServerError + if fiberErr, ok := err.(*fiber.Error); ok { + status = fiberErr.Code + } + } + logger.Log().Debug("HTTP request", + zap.String("method", c.Method()), + zap.String("path", c.Path()), + zap.Int("status", status), + zap.Duration("duration", time.Since(start)), + zap.String("ip", c.IP()), + zap.Error(err), + ) + return err +} diff --git a/apps/druid/adapters/http/handlers/scroll_handler.go b/apps/druid/adapters/http/handlers/scroll_handler.go index c9406cc4..11f7d81e 100644 --- a/apps/druid/adapters/http/handlers/scroll_handler.go +++ b/apps/druid/adapters/http/handlers/scroll_handler.go @@ -70,9 +70,13 @@ func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { if request.OwnerId != nil { ownerID = *request.OwnerId } - runtimeScroll, err := h.supervisor.CreateWithOwner(request.Artifact, name, ownerID, registryCredentials(request.RegistryCredentials)) + namespace := "" + if request.Namespace != nil { + namespace = *request.Namespace + } + runtimeScroll, err := h.supervisor.CreateWithOwner(request.Artifact, name, ownerID, namespace, registryCredentials(request.RegistryCredentials)) if err != nil { - if errors.Is(err, services.ErrScrollAlreadyExists) { + if errors.Is(err, domain.ErrRuntimeScrollAlreadyExists) { return fiber.NewError(fiber.StatusConflict, err.Error()) } if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { @@ -98,7 +102,11 @@ func (h *ScrollHandler) EnsureScroll(c *fiber.Ctx) error { if request.OwnerId != nil { ownerID = *request.OwnerId } - runtimeScroll, err := h.supervisor.EnsureWithOwner(request.Artifact, name, ownerID, registryCredentials(request.RegistryCredentials)) + namespace := "" + if request.Namespace != nil { + namespace = *request.Namespace + } + runtimeScroll, err := h.supervisor.EnsureWithOwner(request.Artifact, name, ownerID, namespace, registryCredentials(request.RegistryCredentials)) if err != nil { if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { return fiber.NewError(fiber.StatusNotImplemented, err.Error()) @@ -352,7 +360,7 @@ func (h *ScrollHandler) RestoreScroll(c *fiber.Ctx, id string) error { func (h *ScrollHandler) getScroll(id string) (*domain.RuntimeScroll, error) { runtimeScroll, err := h.supervisor.Get(id) - if errors.Is(err, services.ErrScrollNotFound) { + if errors.Is(err, domain.ErrRuntimeScrollNotFound) { return nil, fiber.NewError(fiber.StatusNotFound, err.Error()) } return runtimeScroll, err diff --git a/apps/druid/core/services/runtime_lifecycle.go b/apps/druid/core/services/runtime_lifecycle.go index 2d212dca..433035d3 100644 --- a/apps/druid/core/services/runtime_lifecycle.go +++ b/apps/druid/core/services/runtime_lifecycle.go @@ -7,14 +7,23 @@ func (s *RuntimeSupervisor) Delete(id string) error { } func (s *RuntimeSupervisor) DeleteWithPolicy(id string, purgeData bool) error { - session, err := s.detachSession(id) + s.mu.Lock() + session := s.sessions[id] + delete(s.sessions, id) + s.mu.Unlock() + if session != nil { + session.Shutdown() + } + + runtimeScroll, err := s.store.GetScroll(id) if err != nil { return err } - if err := session.DeleteRuntime(purgeData); err != nil { - return err + if runtimeScroll.Root != "" { + if err := s.runtimeBackend.DeleteRuntime(runtimeScroll.Root, purgeData); err != nil { + return err + } } - session.Shutdown() return s.store.DeleteScroll(id) } diff --git a/apps/druid/core/services/runtime_materialization.go b/apps/druid/core/services/runtime_materialization.go index 25efe47d..1ccd59e4 100644 --- a/apps/druid/core/services/runtime_materialization.go +++ b/apps/druid/core/services/runtime_materialization.go @@ -17,12 +17,12 @@ import ( var ErrRuntimeMaterializationUnsupported = errors.New("runtime backend does not support daemon materialization") -func (s *RuntimeSupervisor) materializeNewScroll(ctx context.Context, runtimeService ports.RuntimeBackendInterface, artifact string, name string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { +func (s *RuntimeSupervisor) materializeNewScroll(ctx context.Context, runtimeService ports.RuntimeBackendInterface, artifact string, name string, namespace string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { id := coreservices.RuntimeScrollIDFromName(name) if id == "" { return nil, ErrRuntimeMaterializationUnsupported } - return s.runPullWorker(ctx, runtimeService, ports.RuntimeWorkerModeCreate, id, artifact, s.store.Root(id), registryCredentials) + return s.runPullWorker(ctx, runtimeService, ports.RuntimeWorkerModeCreate, id, artifact, runtimeService.RootRef(id, namespace), registryCredentials) } func (s *RuntimeSupervisor) runPullWorker(ctx context.Context, runtimeService ports.RuntimeBackendInterface, mode ports.RuntimeWorkerMode, runtimeID string, artifact string, root string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { diff --git a/apps/druid/core/services/runtime_session.go b/apps/druid/core/services/runtime_session.go index 344c4ff1..d378d533 100644 --- a/apps/druid/core/services/runtime_session.go +++ b/apps/druid/core/services/runtime_session.go @@ -13,7 +13,7 @@ import ( // the command queue and cached scroll.yaml; storage and containers stay behind // the runtime backend. type RuntimeSession struct { - store coreservices.RuntimeScrollStore + store ports.RuntimeScrollStore runtimeScroll *domain.RuntimeScroll scrollService *coreservices.ScrollService queueManager *coreservices.QueueManager @@ -32,7 +32,7 @@ type RuntimeSession struct { } func NewRuntimeSession( - store coreservices.RuntimeScrollStore, + store ports.RuntimeScrollStore, runtimeScroll *domain.RuntimeScroll, runtimeService ports.RuntimeBackendInterface, ) (*RuntimeSession, error) { diff --git a/apps/druid/core/services/runtime_supervisor.go b/apps/druid/core/services/runtime_supervisor.go index 41f7384c..c97a0cdf 100644 --- a/apps/druid/core/services/runtime_supervisor.go +++ b/apps/druid/core/services/runtime_supervisor.go @@ -16,7 +16,7 @@ import ( // truth and session lifetimes; Docker/Kubernetes resource details stay behind // the runtime backend. type RuntimeSupervisor struct { - store coreservices.RuntimeScrollStore + store ports.RuntimeScrollStore manager *coreservices.RuntimeScrollManager runtimeBackend ports.RuntimeBackendInterface workerCallbacks *WorkerCallbackManager @@ -31,7 +31,7 @@ type RuntimeSupervisor struct { } func NewRuntimeSupervisor( - store coreservices.RuntimeScrollStore, + store ports.RuntimeScrollStore, manager *coreservices.RuntimeScrollManager, runtimeBackend ports.RuntimeBackendInterface, ) *RuntimeSupervisor { @@ -78,23 +78,23 @@ func (s *RuntimeSupervisor) Start() error { } func (s *RuntimeSupervisor) Create(artifact string, name string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { - return s.CreateWithOwner(artifact, name, "", registryCredentials) + return s.CreateWithOwner(artifact, name, "", "", registryCredentials) } -func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerID string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { +func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerID string, namespace string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { id := coreservices.RuntimeScrollIDFromName(name) var placeholder *domain.RuntimeScroll if id != "" { if _, err := s.store.GetScroll(id); err == nil { - return nil, fmt.Errorf("%w: %s", coreservices.ErrScrollAlreadyExists, id) - } else if !errors.Is(err, coreservices.ErrScrollNotFound) { + return nil, fmt.Errorf("%w: %s", domain.ErrRuntimeScrollAlreadyExists, id) + } else if !errors.Is(err, domain.ErrRuntimeScrollNotFound) { return nil, err } placeholder = &domain.RuntimeScroll{ ID: id, OwnerID: ownerID, Artifact: artifact, - Root: s.store.Root(id), + Root: s.runtimeBackend.RootRef(id, namespace), Status: domain.RuntimeScrollStatusCreated, Commands: map[string]domain.LockStatus{}, } @@ -111,7 +111,7 @@ func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerI _ = s.store.UpdateScroll(placeholder) } - materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, name, registryCredentials) + materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, name, namespace, registryCredentials) if err != nil { markPlaceholderError(err) return nil, err @@ -120,24 +120,8 @@ func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerI artifact = materialized.Artifact } if placeholder != nil { - scroll, err := domain.NewScrollFromBytes(materialized.Root, materialized.ScrollYAML) + placeholder, err = s.applyMaterializedScroll(placeholder, artifact, materialized) if err != nil { - markPlaceholderError(err) - return nil, err - } - if err := scroll.Validate(false); err != nil { - markPlaceholderError(err) - return nil, err - } - placeholder.Artifact = artifact - placeholder.ArtifactDigest = materialized.ArtifactDigest - placeholder.Root = materialized.Root - placeholder.ScrollName = scroll.Name - placeholder.ScrollYAML = string(materialized.ScrollYAML) - placeholder.Status = domain.RuntimeScrollStatusCreated - placeholder.LastError = "" - placeholder.Commands = map[string]domain.LockStatus{} - if err := s.store.UpdateScroll(placeholder); err != nil { return nil, err } return placeholder, nil @@ -150,24 +134,50 @@ func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerI } func (s *RuntimeSupervisor) Ensure(artifact string, name string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { - return s.EnsureWithOwner(artifact, name, "", registryCredentials) + return s.EnsureWithOwner(artifact, name, "", "", registryCredentials) } -func (s *RuntimeSupervisor) EnsureWithOwner(artifact string, name string, ownerID string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { +func (s *RuntimeSupervisor) EnsureWithOwner(artifact string, name string, ownerID string, namespace string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { id := coreservices.RuntimeScrollIDFromName(name) if id != "" { runtimeScroll, err := s.store.GetScroll(id) if err == nil { - // A failed first materialization has no scroll.yaml yet. Returning it - // stops an Active CR from spawning one pull worker per reconcile. + if namespace != "" && runtimeScroll.Root != "" { + expectedRoot := s.runtimeBackend.RootRef(id, namespace) + if runtimeScroll.Root != expectedRoot { + return nil, fmt.Errorf("runtime %s already uses root %s; requested namespace %s would use %s", id, runtimeScroll.Root, namespace, expectedRoot) + } + } if runtimeScroll.ScrollYAML == "" { - if ownerID != "" && runtimeScroll.OwnerID != ownerID { - runtimeScroll.OwnerID = ownerID - if err := s.store.UpdateScroll(runtimeScroll); err != nil { - return nil, err + if runtimeScroll.Status == domain.RuntimeScrollStatusError { + if ownerID != "" && runtimeScroll.OwnerID != ownerID { + runtimeScroll.OwnerID = ownerID + if err := s.store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } } + return runtimeScroll, nil } - return runtimeScroll, nil + if artifact == "" { + artifact = runtimeScroll.Artifact + } + materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, name, namespace, registryCredentials) + if err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + if ownerID != "" { + runtimeScroll.OwnerID = ownerID + } + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + if materialized.Artifact != "" { + artifact = materialized.Artifact + } + if ownerID != "" { + runtimeScroll.OwnerID = ownerID + } + return s.applyMaterializedScroll(runtimeScroll, artifact, materialized) } if runtimeScroll.Status == domain.RuntimeScrollStatusError && (artifact == "" || artifact == runtimeScroll.Artifact) { if ownerID != "" && runtimeScroll.OwnerID != ownerID { @@ -204,17 +214,45 @@ func (s *RuntimeSupervisor) EnsureWithOwner(artifact string, name string, ownerI } return runtimeScroll, nil } - if !errors.Is(err, coreservices.ErrScrollNotFound) { + if !errors.Is(err, domain.ErrRuntimeScrollNotFound) { return nil, err } } - runtimeScroll, err := s.CreateWithOwner(artifact, name, ownerID, registryCredentials) + runtimeScroll, err := s.CreateWithOwner(artifact, name, ownerID, namespace, registryCredentials) if err != nil { return nil, err } return runtimeScroll, nil } +func (s *RuntimeSupervisor) applyMaterializedScroll(runtimeScroll *domain.RuntimeScroll, artifact string, materialized *ports.RuntimeMaterialization) (*domain.RuntimeScroll, error) { + scroll, err := domain.NewScrollFromBytes(materialized.Root, materialized.ScrollYAML) + if err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + if err := scroll.Validate(false); err != nil { + runtimeScroll.Status = domain.RuntimeScrollStatusError + runtimeScroll.LastError = err.Error() + _ = s.store.UpdateScroll(runtimeScroll) + return nil, err + } + runtimeScroll.Artifact = artifact + runtimeScroll.ArtifactDigest = materialized.ArtifactDigest + runtimeScroll.Root = materialized.Root + runtimeScroll.ScrollName = scroll.Name + runtimeScroll.ScrollYAML = string(materialized.ScrollYAML) + runtimeScroll.Status = domain.RuntimeScrollStatusCreated + runtimeScroll.LastError = "" + runtimeScroll.Commands = map[string]domain.LockStatus{} + if err := s.store.UpdateScroll(runtimeScroll); err != nil { + return nil, err + } + return runtimeScroll, nil +} + func (s *RuntimeSupervisor) List() ([]*domain.RuntimeScroll, error) { return s.store.ListScrolls() } diff --git a/apps/druid/core/services/runtime_supervisor_test.go b/apps/druid/core/services/runtime_supervisor_test.go index 992ecfd3..db7eabaa 100644 --- a/apps/druid/core/services/runtime_supervisor_test.go +++ b/apps/druid/core/services/runtime_supervisor_test.go @@ -11,6 +11,7 @@ import ( "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" coreservices "github.com/highcard-dev/daemon/internal/core/services" + "github.com/highcard-dev/daemon/internal/runtime/docker" ) func TestRuntimeSessionUsesCachedScrollYAML(t *testing.T) { @@ -33,7 +34,7 @@ commands: `, } - session, err := NewRuntimeSession(coreservices.NewRuntimeStateStore(t.TempDir()), runtimeScroll, &fakeWorkerBackend{}) + session, err := NewRuntimeSession(newTestStateStore(t), runtimeScroll, &fakeWorkerBackend{}) if err != nil { t.Fatal(err) } @@ -168,7 +169,7 @@ func TestRuntimeSupervisorEnsureCanCreate(t *testing.T) { if err := os.WriteFile(filepath.Join(artifact, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { t.Fatal(err) } - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) callbacks := NewWorkerCallbackManager() supervisor := NewRuntimeSupervisor( store, @@ -195,7 +196,7 @@ func TestRuntimeSupervisorCreateCanCreate(t *testing.T) { if err := os.WriteFile(filepath.Join(artifact, "scroll.yaml"), []byte(cachedScrollYAML("start")), 0644); err != nil { t.Fatal(err) } - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) callbacks := NewWorkerCallbackManager() supervisor := NewRuntimeSupervisor( store, @@ -218,7 +219,7 @@ func TestRuntimeSupervisorCreateCanCreate(t *testing.T) { } func TestRuntimeSupervisorCreateUsesPullWorkerBeforeStateMutation(t *testing.T) { - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) callbacks := NewWorkerCallbackManager() backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start"), digest: "sha256:worker"} supervisor := NewRuntimeSupervisor( @@ -235,8 +236,8 @@ func TestRuntimeSupervisorCreateUsesPullWorkerBeforeStateMutation(t *testing.T) if backend.action.Mode != ports.RuntimeWorkerModeCreate || backend.action.RuntimeID != "worker-scroll" { t.Fatalf("worker action = %#v", backend.action) } - if backend.action.RootRef != store.Root("worker-scroll") || backend.action.MountPath != "/scroll" { - t.Fatalf("worker root = %#v, want %s mounted at /scroll", backend.action, store.Root("worker-scroll")) + if backend.action.RootRef != backend.RootRef("worker-scroll", "") || backend.action.MountPath != "/scroll" { + t.Fatalf("worker root = %#v, want %s mounted at /scroll", backend.action, backend.RootRef("worker-scroll", "")) } if backend.action.CallbackToken == "" || !strings.Contains(backend.action.CallbackURL, "/internal/v1/workers/worker-scroll/complete") { t.Fatalf("callback action = %#v", backend.action) @@ -244,13 +245,29 @@ func TestRuntimeSupervisorCreateUsesPullWorkerBeforeStateMutation(t *testing.T) if runtimeScroll.ArtifactDigest != "sha256:worker" { t.Fatalf("artifact digest = %s, want sha256:worker", runtimeScroll.ArtifactDigest) } - if runtimeScroll.Root != store.Root("worker-scroll") { - t.Fatalf("root = %s, want %s", runtimeScroll.Root, store.Root("worker-scroll")) + if runtimeScroll.Root != backend.RootRef("worker-scroll", "") { + t.Fatalf("root = %s, want %s", runtimeScroll.Root, backend.RootRef("worker-scroll", "")) + } +} + +func TestRuntimeSupervisorCreateUsesRequestedNamespaceForRoot(t *testing.T) { + store := newTestStateStore(t) + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start")} + supervisor := NewRuntimeSupervisor(store, coreservices.NewRuntimeScrollManager(store), backend) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.CreateWithOwner("registry.local/lab:1.0", "worker-scroll", "owner-a", "games", nil) + if err != nil { + t.Fatal(err) + } + if want := backend.RootRef("worker-scroll", "games"); backend.action.RootRef != want || runtimeScroll.Root != want { + t.Fatalf("root action=%s scroll=%s want %s", backend.action.RootRef, runtimeScroll.Root, want) } } func TestRuntimeSupervisorEnsureMaterializationFailureIsRemembered(t *testing.T) { - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) callbacks := NewWorkerCallbackManager() backend := &fakeWorkerBackend{callbacks: callbacks, workerErr: errors.New("pull image failed")} supervisor := NewRuntimeSupervisor( @@ -280,8 +297,40 @@ func TestRuntimeSupervisorEnsureMaterializationFailureIsRemembered(t *testing.T) } } +func TestRuntimeSupervisorEnsureRepairsIncompletePlaceholder(t *testing.T) { + store := newTestStateStore(t) + if err := store.CreateScroll(&domain.RuntimeScroll{ + ID: "repair-scroll", + Artifact: "registry.local/lab:1.0", + Root: store.Root("repair-scroll"), + Status: domain.RuntimeScrollStatusCreated, + Commands: map[string]domain.LockStatus{}, + }); err != nil { + t.Fatal(err) + } + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start"), digest: "sha256:repair"} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.Ensure("registry.local/lab:1.0", "repair-scroll", nil) + if err != nil { + t.Fatal(err) + } + if backend.spawnCount != 1 || backend.action.Mode != ports.RuntimeWorkerModeCreate { + t.Fatalf("worker action = %#v spawnCount=%d", backend.action, backend.spawnCount) + } + if runtimeScroll.ScrollYAML == "" || runtimeScroll.ArtifactDigest != "sha256:repair" || runtimeScroll.Status != domain.RuntimeScrollStatusCreated { + t.Fatalf("runtime scroll = %#v", runtimeScroll) + } +} + func TestRuntimeSupervisorEnsureDoesNotRetryExistingError(t *testing.T) { - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) existing := &domain.RuntimeScroll{ ID: "invalid-scroll", Artifact: "registry.local/invalid:1.0", @@ -314,7 +363,7 @@ func TestRuntimeSupervisorEnsureDoesNotRetryExistingError(t *testing.T) { } func TestRuntimeSupervisorEnsureUpdatesChangedArtifact(t *testing.T) { - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) root := "k8s://druid/druid-update-scroll-data" existing := &domain.RuntimeScroll{ ID: "update-scroll", @@ -369,7 +418,7 @@ func TestRuntimeSupervisorEnsureUpdatesChangedArtifact(t *testing.T) { } func TestRuntimeSupervisorUpdateUsesPullWorkerWhenAvailable(t *testing.T) { - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) root := "k8s://druid/druid-update-worker-data" existing := &domain.RuntimeScroll{ ID: "update-worker", @@ -464,10 +513,35 @@ func TestDeriveRuntimeScrollStatusTreatsDoneFiniteAsStopped(t *testing.T) { } } +func TestDeleteDoesNotParseScrollYAML(t *testing.T) { + store := newTestStateStore(t) + backend := &fakeWorkerBackend{} + supervisor := NewRuntimeSupervisor(store, coreservices.NewRuntimeScrollManager(store), backend) + if err := store.CreateScroll(&domain.RuntimeScroll{ + ID: "legacy", + Root: "runtime://legacy", + ScrollName: "legacy", + ScrollYAML: "name: legacy\ncommands:\n start:\n procedures:\n - mode: container\n", + Status: domain.RuntimeScrollStatusCreated, + }); err != nil { + t.Fatal(err) + } + + if err := supervisor.DeleteWithPolicy("legacy", false); err != nil { + t.Fatal(err) + } + if backend.deleteRoot != "runtime://legacy" { + t.Fatalf("delete root = %q, want runtime://legacy", backend.deleteRoot) + } + if _, err := store.GetScroll("legacy"); !errors.Is(err, domain.ErrRuntimeScrollNotFound) { + t.Fatalf("GetScroll after delete error = %v, want not found", err) + } +} + func newRuntimeSessionForTest(t *testing.T, commands map[string]domain.LockStatus, scrollYAML string) *RuntimeSession { t.Helper() root := t.TempDir() - store := coreservices.NewRuntimeStateStore(t.TempDir()) + store := newTestStateStore(t) runtimeScroll := &domain.RuntimeScroll{ ID: "cached", Artifact: "local", @@ -493,6 +567,7 @@ type fakeWorkerBackend struct { workerErr error action ports.RuntimeWorkerAction stopRoot string + deleteRoot string spawnCount int } @@ -500,6 +575,13 @@ func (f *fakeWorkerBackend) Name() string { return "fake-worker" } +func (f *fakeWorkerBackend) RootRef(id string, namespace string) string { + if namespace != "" { + return "runtime://" + namespace + "/" + id + } + return "runtime://" + id +} + func (f *fakeWorkerBackend) ReadScrollFile(root string) ([]byte, error) { return []byte(f.scrollYAML), nil } @@ -536,6 +618,7 @@ func (f *fakeWorkerBackend) StopRuntime(root string) error { } func (f *fakeWorkerBackend) DeleteRuntime(root string, purgeData bool) error { + f.deleteRoot = root return nil } @@ -603,6 +686,15 @@ func assertQueued(t *testing.T, session *RuntimeSession, command string) { } } +func newTestStateStore(t *testing.T) ports.RuntimeScrollStore { + t.Helper() + store, err := docker.NewStateStore(t.TempDir()) + if err != nil { + t.Fatal(err) + } + return store +} + type fakeProcedureStatuses struct { statuses map[string]domain.ScrollLockStatus } diff --git a/examples/container-lab/scroll.yaml b/examples/container-lab/scroll.yaml index 7f94bc91..dbf6a8d9 100644 --- a/examples/container-lab/scroll.yaml +++ b/examples/container-lab/scroll.yaml @@ -7,11 +7,9 @@ ports: - name: http protocol: http port: 8080 - mandatory: true - name: redis protocol: tcp port: 6379 - mandatory: true serve: "start" commands: diff --git a/examples/minecraft/scroll.yaml b/examples/minecraft/scroll.yaml index 960c6139..73986236 100644 --- a/examples/minecraft/scroll.yaml +++ b/examples/minecraft/scroll.yaml @@ -7,8 +7,6 @@ ports: - name: minecraft protocol: tcp port: 25565 - sleep_handler: generic - mandatory: true serve: "start" commands: @@ -41,7 +39,7 @@ commands: sub_path: . env: DRUID_ROOT: "/runtime" - DRUID_COLDSTARTER_STATUS_FILE: ".coldstarter-finished.json" + DRUID_PORT_MINECRAFT_COLDSTARTER: generic command: - druid-coldstarter diff --git a/examples/mysql/scroll.yaml b/examples/mysql/scroll.yaml index 67166f00..0d7fda1f 100644 --- a/examples/mysql/scroll.yaml +++ b/examples/mysql/scroll.yaml @@ -7,7 +7,6 @@ ports: - name: mysql protocol: tcp port: 3306 - mandatory: true serve: "start" commands: diff --git a/examples/static-web/scroll.yaml b/examples/static-web/scroll.yaml index 165593f9..5ec4e875 100644 --- a/examples/static-web/scroll.yaml +++ b/examples/static-web/scroll.yaml @@ -7,7 +7,6 @@ ports: - name: http protocol: http port: 80 - mandatory: true serve: "start" commands: diff --git a/internal/api/generated.go b/internal/api/generated.go index 8f0f7c77..cdece059 100644 --- a/internal/api/generated.go +++ b/internal/api/generated.go @@ -50,6 +50,9 @@ type CreateScrollRequest struct { // Name Optional local runtime scroll id/name. If omitted, the daemon derives it from scroll.yaml name. Name *string `json:"name,omitempty"` + // Namespace Kubernetes namespace for runtime resources. Ignored by non-Kubernetes backends. + Namespace *string `json:"namespace,omitempty"` + // OwnerId Runtime owner id used for customer-facing route authorization. OwnerId *string `json:"owner_id,omitempty"` RegistryCredentials *[]RegistryCredential `json:"registry_credentials,omitempty"` @@ -68,6 +71,9 @@ type EnsureScrollRequest struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + // Namespace Kubernetes namespace for runtime resources. Ignored by non-Kubernetes backends. + Namespace *string `json:"namespace,omitempty"` + // OwnerId Runtime owner id used for customer-facing route authorization. OwnerId *string `json:"owner_id,omitempty"` RegistryCredentials *[]RegistryCredential `json:"registry_credentials,omitempty"` @@ -2944,46 +2950,46 @@ func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, option // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xa3XMbuQ3/VzhsH9eW00v74Def07vmmpu4djp5uMt4KBKSGHFJmuTaVj363zv82NV+", - "cPUVp40z95KJRQAEfgBIAMsnTFWplQTpLD5/wpYuoCThvxdai9W1qhyX82u4q8A6/7M2SoNxHAIRsZbP", - "ZVmzcwdl+M+fDczwOf7TZCN+kmRPrivpeAleNFw0/HhdYLfSgM8xMYas8HpdYAN3FTfA8Plvna0+NbRq", - "+hloYL5UZUkku3HEVfZXooN6jHHHlSTiqqW2MxXkBBggDm6oUUKMG2wcnxEaVhhYarj2G+Bz/P7yLapX", - "kYEZGJAUkDJIKEoEskEw0sQtcIHhkZRaRGsjjz1lpuLsdD6fOLAu/HPu/8GNrtYZLudeV86GCrwBbYAS", - "BwwRwYlFM2WQJCWcovc6opBUMdEDtUqcTQJZR63PampzO3vKjPF7bYDezpAquXPACuQWgBiBUknEwPB7", - "sIg7NDOqTGynK1IKtL9m6kGCuc0hkyIOBQrEGaossAAPraxTJZiTGaFczpHxQYlI5RbK8P8Qz3+a28vA", - "nFtnVrfUAAPpOBEHJEBivmx4dwd/HXa5yH8DAhywGLnDkI2IDEywIVH80gZaFiUNLe6pwz1JEpDT6O/S", - "VuaQVBpoVy/eMj5P3CNJMBqhfwRI7Y5/ABFucQ1WK2lh6IlSsUxOX1bGgHRoEbhRdDcKtO10VMuc/dqo", - "uQFrh2Kv0grSYChIR+YQkBaKMI+wVyzg6pN8pkxJHD7HM6GIPwlL8sjLqsTnr87OClxyGf86a1SQVTkF", - "kwLcuFtGXMa2jwuQ7RMo0IbAb3b0jCc+KnCBZSUEmXpzO3fHSHYEiHJ+yPh14IuFGol2Tax9UCYf85UF", - "MxL3PeWC/BZDS3BW5ZgZFym83tfOOS6rx7KCwYxUwuHzGREWiufLEr9l8G1LnalSAog8LIUSDlfKuJvm", - "4OxaPlWVZLl9igD6LddZTMKaVh0VuXQwj1G8BNAXgt/DB0NmM06zMgSx7pZQx++5W92SICobyPsfluMa", - "aaMosMqM8BnlFFUi7//H2+nKRbga/bh0f3u90a21k1WVoflt3ACNFtxp8aC9ah613C7zgUumHvI6HWJd", - "L/jqXGywTR5oAVqkCNsY3yC0JWL7ZfYgbOHR+aNAbIvPw+5Zr/jt+Oq2ANHVVHC6JR0qI/Jn3Db7uZx/", - "IGYOGetH1fQLVhN6cHbsMv7Y3LEggDplxvuaXEj2UbFg7jmFcQ1rgjEL94vb2/r3QQx3FOhttyWMx4rb", - "rdcNjS2hPbgVpKEVZAedpCPVaDiawZjouK016fC4VGrkHo1B/ewtd4Fj67UlPDZdg/SF1281VrjAppLS", - "k3kqpXX4LVpeNJ3FpwxylWYHop1rR5pISLh1bWk07zi3s3cu+mLYvVPzrQOFlhfGMrABuLdFyDhaGe5W", - "N95ZqZQAYsBcVG6x+eunGpdfPn4IgLbr2V8+fkBOLUHGBpqHgsitkDbqnjMwAQ0v3l9qQdwG1IVzOmjm", - "+es9u+JvFsq4E1+FMHRXgVnVmymDPsL0RtElOESVlEDr0p17xkCM67sibrHZmWj+T/C9jE8fOVN+Y6qk", - "i/m87hv5xlScoct3b5EglaQLsIhIhkoiyRwsCpxcgjmZEroEVk8fiNaC09hSFEjwJfwu56QE5M8eMLZA", - "jDgyJRZsEQQ+wLReO/09qMtdaHUaBXCB/WpU6+z01elZyGUNkmiOz/EP4SdfWrtFcOiEaD65fzWJvZT/", - "Jd1GXQt/Bld3JJ2uCwfhsfJ+yyJhbOqCv0KVG3q7sNlfzs5qJNON34Jg8tn6repR366To9c6Bld1dY4U", - "Ibb/evbD/3Djm3h1oEqSe8JjfxbyqSpLYlYJzj6Ojsxt6IOiJwoc8cafPGvtphg5tuWnLvzvuHU3ieYL", - "wT/k/E6X4HAWMMDmujOAsz1cvPq9GV0bmrTSxsbf5DYDRHtmiuOhDNb9qNjq2QIhN5Zdd28Af4uvB354", - "9Wwq9ODfBTeqr8Qu6tGQHu7bYR+G5ATCaC0UQlmPtEdvX8kjueneXh45+795JKLW90g0pD+thkduXbxa", - "VBpri1WcENmD3fXE2Tqe877+Gborzm4bd2liSAkOjN/iKd6h6YtBukJDrdMFumiB1i+UPn1FJ3Tnzrud", - "UNeA6wK/Pns9PoVN5FI5NAsdb9drcduD8qjIH+M/g3uZyB8Y/l+KuL9Hv/DY8nkw8XVZpcfPrh/D+ld2", - "yfOfh7tmpN/a2RhhRp41JWT3VHwEWrUSLHntKI/XHfjkKf1vPe7960pGndOX3K8RAUVWCG02fCHp/e/Y", - "tfaT8gvT/LqS/btwg8xRzpczPh8topvT9zLSfYNncL9ZHzjiihi76TSTwcPDU+fIjsXUKgF2L1Qj5TeI", - "6wHDuCHmtWFoCStgaBqmHGn6OIQ+ondiqdLAEN2AcgT4Qs33AP6dp3phBUVnzpXB3Nt0DN4iYlFjnf6s", - "IR9HWivj9oD6SsWa/JvD+pCuvvVN8eDOHnmg6uHGs1d5HelHZUwTKfs4c0P7wrJn8PQsd1PU1gU8c5mj", - "exRHAX5XQQW7sf5XIPsOYQ6GjUJch/ddi2oD810CZffxZMA6tW0Kcx0J/mhlvnafG3Heu5epHXdUbrW+", - "uuW9Hp7MprlYon05rs+99/3W3D3W93R8fgXGcuvSQzJlTuLLYWAovi5ApvHNMAjid+hdITBx4TnBHpda", - "5/nBiy9Vuo8p9qlWIgOq8crUi45MRRwygGS1bxqGI3zUvPfKJ+mNX/5Ox3438Rnj9vwIRM8yz7NO6W1A", - "K/3d4hzeN+zCWel+Qf2gzFIowix6WHABSBsIL2DkPHyE3uGG9DynBnHs6/jF1VucXkrhCfZIJaGDz/pR", - "qfgBvQTpwkePNPNBEO5OT9n4qJkzPQ37Q2SdAVJ6Uzy3AWc43BOx4Q7d35A3lXWpKNsos2GMhdmQM/v0", - "IOweHhrYjYQHmNpAmZHi+y/EZXx3wpUMApqmJwkIZ86QN36lRnQBdGmzjOk785D110o4fpLiog6TnPV1", - "JAxFvIlPBQSfAV1RkWdP4TPk/skH4ANxdFH7jME9CKVDJKR33jV+niwj40JK5SJqMy+OUAq2ZT1p1i1e", - "f1r/NwAA//9iQ40TbzQAAA==", + "H4sIAAAAAAAC/+xaS3Mcue3/Kqz+/48jjZx1ctBNK2c32vWWFckpH3ZdKg6J6aHFJimSLWmimu+e4qun", + "H+x5WU4k115c1hAAgR8AEkDzqSCyUlKAsKY4fSoMWUCF/X/PlOLLK1lbJsoruKvBWPez0lKBtgw8ETaG", + "laJK7MxC5f/z/xrmxWnxf9O1+GmUPb2qhWUVONFw1vAXq0lhlwqK0wJrjZfFajUpNNzVTAMtTn/vbPW5", + "oZWzL0A887msKizotcW2Nr9h5dWjlFkmBeaXLbWtriEnQAO2cE205HzcYG3ZHBO/QsEQzZTboDgtPpxf", + "oLSKNMxBgyCApEZcEsyR8YKRwnZRTAp4xJXiwdrAY46prhk9LsupBWP9P6fun6LR1VjNROl0ZXSowDtQ", + "Ggi2QBHmDBs0lxoJXMEx+qACClEVHTyQVGJ06sk6an2RM5Pb2VFmjN9pA3QxR7Ji1gKdILsARDFUUiAK", + "mt2DQcyiuZZVZDte4oqj/TQzCpOMer/WM9ACLBjUUHl4kqIajKw1AXOMLkohNVA0WyIhxVGLdYbJLQhq", + "jnO7ywcB+ibnlxjvyFMgRlFtgPrdSW2srEAfzTFhokTapQTCtV1Izf6NHX92Lw0lM1Yvb4gGCsIyzPdI", + "v8h83vBuT70U9Lm8ewccLNCQN8OECYgMTDA+Td3S2rE0SBpa3FOHOZIoIKfR34Wp9T6JPNAuLd5QVkbu", + "kRQczY8/w/NlhOc/AHO7uAKjpDAwjINK0oxHzmutQVi08NwoBBvytO2jSN7m7FdalhqMGYq9jCtIgSYg", + "LC6Dn7nE1CHsFPO4ugNuLnWFbXFazLnE7hao8COr6qo4fXNyMikqJsJfJ40Koq5moGN6aXtDsc3Y9mkB", + "on36elqfds2OjvHIRUUxKUTNOZ45czv35khueohyfsj4deCLhRzJNYWNeZA6n3G1AT2SdT3lvPwWQ0tw", + "VuWQGWcxvD4k5xx2poxlBYU5rrktTueYG5g8X5a4Lb1vW+rMpOSAxX4pFHG4lNpeN8d21/KZrAXN7TPx", + "oN8wlcXErynZUZEJC2WI4lsAdcbZPXzUeD5nJCuDY2NvMLHsntnlDfaisoG8+1E9rpHSkgCt9QifllYS", + "yfP+f7yZLW2Aq9GPCfu3t2vdWjuFAz8ryQ7QaMEdF/faK/HI280yH5ig8iGv0z7W9YIv5WKDbfRAC9BJ", + "jLC18Q1CGyK232IMwhYerTsK+Kb43O+Wd4rfjK9uChBVzzgjG9Kh1jx/xm2yn4nyI9YlZKzfrVTZJzu2", + "GX9o7hjgQKzU4z1dLiT7qBjQ94zAuIaJYMzC3eL2Jv0+iOGOAr3tNoTxWGm98bohoR02e7fBxLfBdK+T", + "dKQW9kczaB0ct7EmHR6XUo7coyGon33cMClC27khPNY9i3CF1+8Jq2JS6FoIR+aopFL+t2D5pOlrPmeQ", + "qxXdE+1cM9REQsSta0ujece5nb1z0RfC7r0sNw5TWl4Yy8AG4N4WPuNIrZldXjtnxVICsAZ9VtvF+q+f", + "Ei6/fProAW3Xs798+oisvAURhgfMF0R2iZSW94yC9mg48e5S8+LWoC6sVV4zx5/27Iq/Xkhtj1wVQtFd", + "DXqZNpMafYLZtSS3YBGRQgBJpTtzjJ64SHdF2GK9M1bsV3C9jEsfMZduYyKFDfm86hv5TteMovP3F4jj", + "WpAFGIQFRRUWuASDPCcToI98F0jT5AUrxRkJLcUEcXYLf4gSV4Dc2QPaTBDFFs+wATPxAh9gltaO//Dq", + "MutbnUaBYlK41aDWyfGb4xOfywoEVqw4LX7wP7nS2i68Q6dYsen9m2nopdwv8TbqWvgz2NSRdLquwgsP", + "lfcFDYShqfP+8lWu7+38Zn85OUlIxhu/BcH0i3FbpTHntpOj1zp6V3V1DhQ+tv968sN/cePrcHWgWuB7", + "zEJ/5vOpriqslxHOPo4Wl8b3QcETkyLgXXx2rMlNIXJMy09d+N8zY68jzVeCv8/5HS/B4SxggM1VZ/ho", + "erg49XvzyTY0caWNjbvJTQaI9ry4CIcyGPujpMtnC4TcSHrVvQHcLb4a+OHNs6nQg38b3ChdiV3UgyE9", + "3DfDPgzJKfjBni+Esh5pD/6+kUdys8WdPHLyP/NIQK3vkWBIf1IPj8zYcLXIONLnyzAhMnu764nRVTjn", + "Xf0zdFeYHDfuUljjCixot8VTuEPj15J4hfpapwv0pAVav1D6/A2d0J16b3dCqgFXk+LtydvxKWwkF9Ki", + "ue94u14L2+6VR5P8Mf4z2NeJ/J7h/7WIu3v0K48tlwdTV5fVavzs+tGvf2OXPP95uG1G+tLOxgAzcqwx", + "Ibun4iOQupVg0WsHeTx14NOn+L/VuPevahF0jl+xv0UETLJCSLPhK0nvf4WutZ+UX5nmV7Xo34VrZA5y", + "vpizcrSIbk7f80D3As/gfrM+cMQl1mbdaUaDh4enypEdiqmRHMxOqAbKF4jrHsO4IebJMHQLy/AReD19", + "HEIf0DsyRCqgiKxBOQB8LssdgH/vqF5ZQdGZc2UwdzYdgjcPWCSs458J8nGkldR2B6gvZajJXxzW+3T1", + "rW+Ke3f2yAGVhhvPXuV1pB+UMU2k7OLMNe0ry57Bs7vcTZGs83jmMkf1KA4C/K6GGrZj/U9P9h3C7A0b", + "hTiF912Lag3zXQRl+/GkwVi5aQpzFQj+bGW+dZ8bcN65l0mOOyi3Wl/d8l73z4XjXCzSvh7X5946vzR3", + "j/U9HZ9fgjbM2PiQTOqj8GoaKAqvC5BufDMMgvAdelsITK1/TrDDpdZ5fvDqS5XuY4pdqpXAgBJemXrR", + "4hmH9HAz+aZhOMBHzXuvfJJeu+XvdOx3HZ4xbs4PT/Qs8zxjpdoEtFTfLc7+fcM2nKXqF9QPUt9yialB", + "DwvGASkN/gWMKP1H6C1uiM9zEohjX8fPLi+K+FKqmBYOqSh08Fk/KBU+oFcgrP/oEWc+CPzd6SgbHzVz", + "pqdhf4iM1YArZ4rj1mA1g3vM19y++xvyxrIuFmVrZdaMoTAbcmafHvjd/UMDs5bwADPjKTNSXP+FmAjv", + "TpgUXkDT9EQB/swZ8oav1IgsgNyaLGP8zjxk/a3mlh3FuEhhkrM+RcJQxLvwVICzOZAl4Xn2GD5D7p9c", + "AD5gSxbJZxTugUvlIyG+8074ObKMjDMhpA2ozZ04TAiYlvW4WTfF6vPqPwEAAP//M7dIxGs1AAA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/internal/core/domain/runtime_scroll.go b/internal/core/domain/runtime_scroll.go index 078febc8..ea5521a5 100644 --- a/internal/core/domain/runtime_scroll.go +++ b/internal/core/domain/runtime_scroll.go @@ -1,6 +1,14 @@ package domain -import "time" +import ( + "errors" + "time" +) + +var ( + ErrRuntimeScrollNotFound = errors.New("runtime scroll not found") + ErrRuntimeScrollAlreadyExists = errors.New("runtime scroll already exists") +) type RuntimeScrollStatus string diff --git a/internal/core/domain/scroll.go b/internal/core/domain/scroll.go index 7c08c7af..85f36b32 100644 --- a/internal/core/domain/scroll.go +++ b/internal/core/domain/scroll.go @@ -32,28 +32,20 @@ type Chunks struct { Chunks []*Chunks `yaml:"chunks,omitempty" json:"chunks,omitempty"` } -type ColdStarterVars struct { - Name string `yaml:"name"` - Value string `yaml:"value"` -} - type Port struct { - Port int `yaml:"port" json:"port"` - Protocol string `yaml:"protocol" json:"protocol"` - Name string `yaml:"name" json:"name"` - SleepHandler *string `yaml:"sleep_handler" json:"sleep_handler"` - Mandatory bool `yaml:"mandatory" json:"mandatory"` - Vars []ColdStarterVars `yaml:"vars" json:"vars"` - StartDelay uint `yaml:"start_delay" json:"start_delay"` - FinishAfterCommand string `yaml:"finish_after_command" json:"finish_after_command"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` + Port int `yaml:"port" json:"port"` + Protocol string `yaml:"protocol" json:"protocol"` + Name string `yaml:"name" json:"name"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` } type AugmentedPort struct { Port - InactiveSince time.Time `json:"inactive_since"` - InactiveSinceSec uint `json:"inactive_since_sec"` - Open bool `json:"open"` + ColdstarterHandler string `json:"-"` + ColdstarterVars map[string]string `json:"-"` + InactiveSince time.Time `json:"inactive_since"` + InactiveSinceSec uint `json:"inactive_since_sec"` + Open bool `json:"open"` } type File struct { diff --git a/internal/core/ports/services_ports.go b/internal/core/ports/services_ports.go index 5effaa13..a0e0a169 100644 --- a/internal/core/ports/services_ports.go +++ b/internal/core/ports/services_ports.go @@ -42,6 +42,7 @@ type LogManagerInterface interface { type RuntimeBackendInterface interface { Name() string + RootRef(id string, namespace string) string ReadScrollFile(root string) ([]byte, error) StartDev(ctx context.Context, action RuntimeDevAction) error StopDev(ctx context.Context, root string) error @@ -57,6 +58,16 @@ type RuntimeBackendInterface interface { Signal(commandName string, target string, signal string, root string) error } +type RuntimeScrollStore interface { + StateDir() string + Root(id string) string + CreateScroll(scroll *domain.RuntimeScroll) error + ListScrolls() ([]*domain.RuntimeScroll, error) + GetScroll(id string) (*domain.RuntimeScroll, error) + UpdateScroll(scroll *domain.RuntimeScroll) error + DeleteScroll(id string) error +} + type RuntimeCommand struct { Name string ScrollID string diff --git a/internal/core/services/coldstarter.go b/internal/core/services/coldstarter.go index 89e3151f..89fd8a07 100644 --- a/internal/core/services/coldstarter.go +++ b/internal/core/services/coldstarter.go @@ -67,27 +67,21 @@ func (c *ColdStarter) Serve(ctx context.Context) { for _, port := range augmentedPorts { port := port - var sleepHandler string - if port.SleepHandler == nil { - logger.Log().Warn("Skipping coldstarter port without sleep handler", zap.Int("port", port.Port.Port), zap.String("port_name", port.Name)) + if port.ColdstarterHandler == "" { + logger.Log().Warn("Skipping coldstarter port without handler", zap.Int("port", port.Port.Port), zap.String("port_name", port.Name)) continue } - sleepHandler = *port.SleepHandler var handler ports.ColdStarterHandlerInterface - if sleepHandler == "generic" { + if port.ColdstarterHandler == "generic" { handler = lua.NewGenericReturnHandler() } else { - path := filepath.Join(c.dir, filepath.Clean(sleepHandler)) + path := filepath.Join(c.dir, filepath.Clean(port.ColdstarterHandler)) if rel, err := filepath.Rel(c.dir, path); err != nil || rel == ".." || filepath.IsAbs(rel) || strings.HasPrefix(rel, "../") { - logger.Log().Error("Invalid coldstarter handler path", zap.String("sleep_handler", sleepHandler)) + logger.Log().Error("Invalid coldstarter handler path", zap.String("handler", port.ColdstarterHandler)) continue } - vars := make(map[string]string, len(port.Vars)) - for _, v := range port.Vars { - vars[v.Name] = v.Value - } - handler = lua.NewLuaHandler(c.queueManager, path, c.dir, vars, augmentedPortMap, c.progress) + handler = lua.NewLuaHandler(c.queueManager, path, c.dir, port.ColdstarterVars, augmentedPortMap, c.progress) } c.chandlers = append(c.chandlers, handler) @@ -99,10 +93,10 @@ func (c *ColdStarter) Serve(ctx context.Context) { var server ports.ColdStarterServerInterface switch port.Protocol { case "udp": - logger.Log().Info("Starting UDP coldstarter", zap.Int("port", port.Port.Port), zap.String("sleep_handler", sleepHandler), zap.String("port_name", port.Name)) + logger.Log().Info("Starting UDP coldstarter", zap.Int("port", port.Port.Port), zap.String("handler", port.ColdstarterHandler), zap.String("port_name", port.Name)) server = servers.NewUDP(handler) case "tcp", "http", "https", "": - logger.Log().Info("Starting TCP coldstarter", zap.Int("port", port.Port.Port), zap.String("sleep_handler", sleepHandler), zap.String("port_name", port.Name)) + logger.Log().Info("Starting TCP coldstarter", zap.Int("port", port.Port.Port), zap.String("handler", port.ColdstarterHandler), zap.String("port_name", port.Name)) server = servers.NewTCP(handler) default: logger.Log().Warn("Unsupported coldstarter protocol", zap.String("protocol", port.Protocol), zap.String("port_name", port.Name)) diff --git a/internal/core/services/coldstarter/handler/lua_handler.go b/internal/core/services/coldstarter/handler/lua_handler.go index 03b8695c..cdbda1a7 100644 --- a/internal/core/services/coldstarter/handler/lua_handler.go +++ b/internal/core/services/coldstarter/handler/lua_handler.go @@ -2,8 +2,10 @@ package lua import ( "fmt" + "strings" "sync" "time" + "unicode" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" @@ -124,8 +126,10 @@ func (handler *LuaHandler) GetHandler(funcs map[string]func(data ...string)) (po func(l *lua.LState) int { arg := l.CheckString(1) - //get external var value, ok := handler.externalVars[arg] + if !ok { + value, ok = handler.externalVars[coldstarterVarKey(arg)] + } if !ok { l.Push(lua.LNil) } else { @@ -217,6 +221,21 @@ func (handler *LuaHandler) GetHandler(funcs map[string]func(data ...string)) (po return handler.stateWrapper, nil } +func coldstarterVarKey(value string) string { + var out strings.Builder + for i, r := range value { + if i > 0 && unicode.IsUpper(r) { + out.WriteByte('_') + } + if r == '-' || r == ' ' { + out.WriteByte('_') + continue + } + out.WriteRune(unicode.ToUpper(r)) + } + return out.String() +} + func (handler *LuaWrapper) Handle(data []byte, funcs map[string]func(data ...string)) error { if handler.luaState.IsClosed() { return fmt.Errorf("lua state is closed") diff --git a/internal/core/services/runtime_scroll_manager.go b/internal/core/services/runtime_scroll_manager.go index 4f2342e4..d279ecf2 100644 --- a/internal/core/services/runtime_scroll_manager.go +++ b/internal/core/services/runtime_scroll_manager.go @@ -15,12 +15,10 @@ import ( ) type RuntimeScrollManager struct { - store RuntimeScrollStore + store ports.RuntimeScrollStore } -var ErrScrollAlreadyExists = errors.New("runtime scroll already exists") - -func NewRuntimeScrollManager(store RuntimeScrollStore) *RuntimeScrollManager { +func NewRuntimeScrollManager(store ports.RuntimeScrollStore) *RuntimeScrollManager { return &RuntimeScrollManager{store: store} } @@ -50,8 +48,8 @@ func (m *RuntimeScrollManager) CreateWithDigest(artifact string, artifactDigest return nil, err } if _, err := m.store.GetScroll(id); err == nil { - return nil, fmt.Errorf("%w: %s", ErrScrollAlreadyExists, id) - } else if !errors.Is(err, ErrScrollNotFound) { + return nil, fmt.Errorf("%w: %s", domain.ErrRuntimeScrollAlreadyExists, id) + } else if !errors.Is(err, domain.ErrRuntimeScrollNotFound) { return nil, err } diff --git a/internal/core/services/runtime_scroll_manager_test.go b/internal/core/services/runtime_scroll_manager_test.go index 933ee7da..6767f027 100644 --- a/internal/core/services/runtime_scroll_manager_test.go +++ b/internal/core/services/runtime_scroll_manager_test.go @@ -5,6 +5,8 @@ import ( "os" "path/filepath" "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" ) const testScrollYAML = `name: ghcr.io/druid-examples/static-web:1.0 @@ -43,22 +45,15 @@ func TestRuntimeScrollID(t *testing.T) { } func TestRuntimeScrollManagerCreateFailsDuplicateID(t *testing.T) { - store := NewRuntimeStateStore(t.TempDir()) + store := newMemoryRuntimeStore(t.TempDir()) manager := NewRuntimeScrollManager(store) if _, err := manager.Create("artifact", "", t.TempDir(), []byte(testScrollYAML)); err != nil { t.Fatal(err) } _, err := manager.Create("artifact", "", t.TempDir(), []byte(testScrollYAML)) - if !errors.Is(err, ErrScrollAlreadyExists) { - t.Fatalf("error = %v, want ErrScrollAlreadyExists", err) - } -} - -func TestRuntimeStateStoreUsesSingleRuntimeRoot(t *testing.T) { - store := NewRuntimeStateStore(t.TempDir()) - if got, want := store.Root("scroll-a"), filepath.Join(store.StateDir(), "scrolls", "scroll-a"); got != want { - t.Fatalf("Root = %s, want %s", got, want) + if !errors.Is(err, domain.ErrRuntimeScrollAlreadyExists) { + t.Fatalf("error = %v, want domain.ErrRuntimeScrollAlreadyExists", err) } } @@ -90,3 +85,55 @@ func TestMaterializeScrollArtifactKeepsScrollYamlNextToData(t *testing.T) { t.Fatalf("state = %q, want ok", got) } } + +type memoryRuntimeStore struct { + stateDir string + scrolls map[string]*domain.RuntimeScroll +} + +func newMemoryRuntimeStore(stateDir string) *memoryRuntimeStore { + return &memoryRuntimeStore{stateDir: stateDir, scrolls: map[string]*domain.RuntimeScroll{}} +} + +func (s *memoryRuntimeStore) StateDir() string { return s.stateDir } + +func (s *memoryRuntimeStore) Root(id string) string { + return filepath.Join(s.stateDir, "scrolls", id) +} + +func (s *memoryRuntimeStore) CreateScroll(scroll *domain.RuntimeScroll) error { + s.scrolls[scroll.ID] = scroll + return nil +} + +func (s *memoryRuntimeStore) ListScrolls() ([]*domain.RuntimeScroll, error) { + scrolls := make([]*domain.RuntimeScroll, 0, len(s.scrolls)) + for _, scroll := range s.scrolls { + scrolls = append(scrolls, scroll) + } + return scrolls, nil +} + +func (s *memoryRuntimeStore) GetScroll(id string) (*domain.RuntimeScroll, error) { + scroll, ok := s.scrolls[id] + if !ok { + return nil, domain.ErrRuntimeScrollNotFound + } + return scroll, nil +} + +func (s *memoryRuntimeStore) UpdateScroll(scroll *domain.RuntimeScroll) error { + if _, ok := s.scrolls[scroll.ID]; !ok { + return domain.ErrRuntimeScrollNotFound + } + s.scrolls[scroll.ID] = scroll + return nil +} + +func (s *memoryRuntimeStore) DeleteScroll(id string) error { + if _, ok := s.scrolls[id]; !ok { + return domain.ErrRuntimeScrollNotFound + } + delete(s.scrolls, id) + return nil +} diff --git a/internal/runtime/backend.go b/internal/runtime/backend.go index e68bd05e..e5b4ea9c 100644 --- a/internal/runtime/backend.go +++ b/internal/runtime/backend.go @@ -4,15 +4,13 @@ import ( "fmt" "github.com/highcard-dev/daemon/internal/core/ports" - coreservices "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/runtime/docker" runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" - "github.com/highcard-dev/daemon/internal/utils" ) type Runtime struct { Backend ports.RuntimeBackendInterface - Store coreservices.RuntimeScrollStore + Store ports.RuntimeScrollStore } type Options struct { @@ -42,7 +40,7 @@ var newKubernetesBackend = func(config runtimekubernetes.Config, consoleManager return runtimekubernetes.New(config, consoleManager) } -var newKubernetesStateStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { +var newKubernetesStateStore = func(config runtimekubernetes.Config) (ports.RuntimeScrollStore, error) { return runtimekubernetes.NewConfigMapStateStore(config) } @@ -57,7 +55,7 @@ func NewRuntime(name string, consoleManager ports.ConsoleManagerInterface, state if err != nil { return nil, err } - store, err := newSQLiteStore(stateDir) + store, err := docker.NewStateStore(stateDir) if err != nil { return nil, err } @@ -80,19 +78,8 @@ func NewRuntime(name string, consoleManager ports.ConsoleManagerInterface, state } } -func newSQLiteStore(stateDir string) (coreservices.RuntimeScrollStore, error) { - if stateDir == "" { - defaultStateDir, err := utils.DefaultRuntimeStateDir() - if err != nil { - return nil, err - } - stateDir = defaultStateDir - } - return coreservices.NewRuntimeStateStore(stateDir), nil -} - type dockerRuntimeStore struct { - coreservices.RuntimeScrollStore + ports.RuntimeScrollStore config docker.Config } diff --git a/internal/runtime/backend_factory_test.go b/internal/runtime/backend_factory_test.go index ce0de548..db9f0b5d 100644 --- a/internal/runtime/backend_factory_test.go +++ b/internal/runtime/backend_factory_test.go @@ -9,7 +9,6 @@ import ( "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" - coreservices "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/runtime/docker" runtimekubernetes "github.com/highcard-dev/daemon/internal/runtime/kubernetes" ) @@ -46,7 +45,7 @@ func TestNewRuntimeKubernetesOwnsStoreSelection(t *testing.T) { } return fakeBackend{name: "kubernetes"}, nil } - newKubernetesStateStore = func(config runtimekubernetes.Config) (coreservices.RuntimeScrollStore, error) { + newKubernetesStateStore = func(config runtimekubernetes.Config) (ports.RuntimeScrollStore, error) { if config.Namespace != "druid" { t.Fatalf("store namespace = %s, want druid", config.Namespace) } @@ -87,6 +86,13 @@ func (f fakeBackend) Name() string { return f.name } +func (f fakeBackend) RootRef(id string, namespace string) string { + if namespace != "" { + return namespace + "/" + id + } + return id +} + func (f fakeBackend) ReadScrollFile(root string) ([]byte, error) { return nil, nil } @@ -158,7 +164,7 @@ func (f fakeStore) ListScrolls() ([]*domain.RuntimeScroll, error) { } func (f fakeStore) GetScroll(id string) (*domain.RuntimeScroll, error) { - return nil, coreservices.ErrScrollNotFound + return nil, domain.ErrRuntimeScrollNotFound } func (f fakeStore) UpdateScroll(scroll *domain.RuntimeScroll) error { diff --git a/internal/runtime/docker/backend.go b/internal/runtime/docker/backend.go index ca8b1c82..dd1e4513 100644 --- a/internal/runtime/docker/backend.go +++ b/internal/runtime/docker/backend.go @@ -106,6 +106,14 @@ func (b *Backend) Name() string { return "docker" } +func (b *Backend) RootRef(id string, _ string) string { + root, err := b.config.RuntimeRootRef(id) + if err != nil { + return id + } + return root +} + func (b *Backend) ReadScrollFile(root string) ([]byte, error) { if root == "" { return nil, fmt.Errorf("runtime root is required") @@ -130,12 +138,12 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { if procedure.Image == "" { return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) } - if err := b.startPersistentContainer(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.Root, command.GlobalPorts, env); err != nil { + if err := b.startPersistentContainer(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedureResourceName(command.Name, idx), procedure, command.Root, command.GlobalPorts, env); err != nil { return nil, err } continue } - exitCode, err := b.runProcedure(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedure, command.Root, command.GlobalPorts, env) + exitCode, err := b.runProcedure(runtimeConsoleID(command.ScrollID, procedureName), procedureName, procedureResourceName(command.Name, idx), procedure, command.Root, command.GlobalPorts, env) if err != nil { return exitCode, err } @@ -149,14 +157,14 @@ func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { return nil, nil } -func (b *Backend) runProcedure(consoleID string, procedureName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { +func (b *Backend) runProcedure(consoleID string, procedureName string, resourceName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { if procedure.IsSignal() { return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, root) } if procedure.Image == "" { return nil, fmt.Errorf("docker runtime procedure %s requires image", procedureName) } - return b.runContainer(consoleID, procedureName, procedure, root, globalPorts, env) + return b.runContainer(consoleID, procedureName, resourceName, procedure, root, globalPorts, env) } func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { @@ -217,6 +225,7 @@ func (b *Backend) RoutingTargets(root string, commands map[string]*domain.Comman continue } procedureName := domain.ProcedureName(commandName, idx, procedure) + serviceName := ContainerName(root, procedureResourceName(commandName, idx)) for _, expectedPort := range procedure.ExpectedPorts { if _, ok := seen[expectedPort.Name]; ok { continue @@ -232,7 +241,7 @@ func (b *Backend) RoutingTargets(root string, commands map[string]*domain.Comman PortName: expectedPort.Name, Port: port.Port, Protocol: normalizeProtocol(port.Protocol), - ServiceName: ContainerName(root, procedureName), + ServiceName: serviceName, ServicePort: port.Port, }) } @@ -752,7 +761,7 @@ func (b *Backend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorke return nil } -func (b *Backend) runContainer(consoleID string, commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { +func (b *Backend) runContainer(consoleID string, commandName string, resourceName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { ctx := context.Background() if procedure.Image == "" { return nil, errors.New("docker image is required") @@ -772,7 +781,7 @@ func (b *Backend) runContainer(consoleID string, commandName string, procedure * if err != nil { return nil, err } - containerName := ContainerName(root, commandName) + containerName := ContainerName(root, resourceName) _ = b.client.ContainerRemove(ctx, containerName, container.RemoveOptions{Force: true}) created, err := b.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) @@ -842,7 +851,7 @@ func (b *Backend) runContainer(consoleID string, commandName string, procedure * return &exitCode, nil } -func (b *Backend) startPersistentContainer(consoleID string, commandName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) error { +func (b *Backend) startPersistentContainer(consoleID string, commandName string, resourceName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) error { ctx := context.Background() if procedure.Image == "" { return errors.New("docker image is required") @@ -860,7 +869,7 @@ func (b *Backend) startPersistentContainer(consoleID string, commandName string, if err != nil { return err } - containerName := ContainerName(root, commandName) + containerName := ContainerName(root, resourceName) _ = b.client.ContainerRemove(ctx, containerName, container.RemoveOptions{Force: true}) created, err := b.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) if err != nil { @@ -1050,9 +1059,26 @@ func procedureDataSubPath(subPath string) string { } func ContainerName(root string, commandName string) string { - hash := sha1.Sum([]byte(root)) - name := sanitizeContainerName(commandName) - return fmt.Sprintf("druid-%s-%s", hex.EncodeToString(hash[:])[:10], name) + return sanitizeContainerName(fmt.Sprintf("%s-%s", runtimeID(root), commandName)) +} + +func runtimeID(root string) string { + switch { + case strings.HasPrefix(root, "docker-volume://"): + name := strings.TrimPrefix(root, "docker-volume://") + if strings.HasPrefix(name, "druid-") && strings.HasSuffix(name, "-data") { + return strings.TrimSuffix(strings.TrimPrefix(name, "druid-"), "-data") + } + return name + case strings.HasPrefix(root, "docker-bind://"): + return strings.TrimSuffix(filepath.Base(strings.TrimPrefix(root, "docker-bind://")), "-data") + default: + return strings.TrimSuffix(filepath.Base(root), "-data") + } +} + +func procedureResourceName(commandName string, procedureIndex int) string { + return fmt.Sprintf("%s-%d", commandName, procedureIndex) } func rootHash(root string) string { diff --git a/internal/runtime/docker/backend_names_test.go b/internal/runtime/docker/backend_names_test.go new file mode 100644 index 00000000..a24b7e12 --- /dev/null +++ b/internal/runtime/docker/backend_names_test.go @@ -0,0 +1,44 @@ +package docker + +import ( + "testing" + + "github.com/highcard-dev/daemon/internal/core/domain" +) + +func TestContainerNameUsesDeploymentCommandAndProcedureIndex(t *testing.T) { + root := "docker-volume://druid-0636a354-b3f4-4471-8749-3e17ab6c52-data" + + got := ContainerName(root, procedureResourceName("start", 0)) + want := "0636a354-b3f4-4471-8749-3e17ab6c52-start-0" + if got != want { + t.Fatalf("container name = %q, want %q", got, want) + } +} + +func TestRoutingTargetsUseFirstConcreteProcedureForSharedDockerPort(t *testing.T) { + root := "docker-volume://druid-minecraft-data" + coldstart := "coldstart" + start := "start" + + targets, err := (&Backend{}).RoutingTargets(root, map[string]*domain.CommandInstructionSet{ + "start": {Procedures: []*domain.Procedure{ + {Id: &coldstart, ExpectedPorts: []domain.ExpectedPort{{Name: "main"}}}, + {Id: &start, ExpectedPorts: []domain.ExpectedPort{{Name: "main"}}}, + }}, + }, []domain.Port{{Name: "main", Port: 25565, Protocol: "tcp"}}) + if err != nil { + t.Fatal(err) + } + + for _, target := range targets { + if target.Name != "main" { + continue + } + if target.Procedure != "coldstart" || target.ServiceName != ContainerName(root, "start-0") { + t.Fatalf("target = %#v", target) + } + return + } + t.Fatalf("main target missing: %#v", targets) +} diff --git a/internal/core/services/runtime_state_store.go b/internal/runtime/docker/state_store.go similarity index 86% rename from internal/core/services/runtime_state_store.go rename to internal/runtime/docker/state_store.go index fe5db9b6..6f1e5dbc 100644 --- a/internal/core/services/runtime_state_store.go +++ b/internal/runtime/docker/state_store.go @@ -1,4 +1,4 @@ -package services +package docker import ( "database/sql" @@ -10,42 +10,38 @@ import ( "time" "github.com/highcard-dev/daemon/internal/core/domain" + "github.com/highcard-dev/daemon/internal/utils" _ "modernc.org/sqlite" ) -var ErrScrollNotFound = errors.New("runtime scroll not found") - -type RuntimeStateStore struct { +type StateStore struct { stateDir string dbPath string } -type RuntimeScrollStore interface { - StateDir() string - Root(id string) string - CreateScroll(scroll *domain.RuntimeScroll) error - ListScrolls() ([]*domain.RuntimeScroll, error) - GetScroll(id string) (*domain.RuntimeScroll, error) - UpdateScroll(scroll *domain.RuntimeScroll) error - DeleteScroll(id string) error -} - -func NewRuntimeStateStore(stateDir string) *RuntimeStateStore { - return &RuntimeStateStore{ +func NewStateStore(stateDir string) (*StateStore, error) { + if stateDir == "" { + defaultStateDir, err := utils.DefaultRuntimeStateDir() + if err != nil { + return nil, err + } + stateDir = defaultStateDir + } + return &StateStore{ stateDir: stateDir, dbPath: filepath.Join(stateDir, "state.db"), - } + }, nil } -func (s *RuntimeStateStore) StateDir() string { +func (s *StateStore) StateDir() string { return s.stateDir } -func (s *RuntimeStateStore) Root(id string) string { +func (s *StateStore) Root(id string) string { return filepath.Join(s.stateDir, "scrolls", id) } -func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { +func (s *StateStore) CreateScroll(scroll *domain.RuntimeScroll) error { db, err := s.open() if err != nil { return err @@ -80,7 +76,7 @@ func (s *RuntimeStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { return nil } -func (s *RuntimeStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { +func (s *StateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { db, err := s.open() if err != nil { return nil, err @@ -108,7 +104,7 @@ func (s *RuntimeStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { return scrolls, rows.Err() } -func (s *RuntimeStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) { +func (s *StateStore) GetScroll(id string) (*domain.RuntimeScroll, error) { db, err := s.open() if err != nil { return nil, err @@ -122,12 +118,12 @@ func (s *RuntimeStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) `, id) scroll, err := scanRuntimeScroll(row) if errors.Is(err, sql.ErrNoRows) { - return nil, ErrScrollNotFound + return nil, domain.ErrRuntimeScrollNotFound } return scroll, err } -func (s *RuntimeStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { +func (s *StateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { db, err := s.open() if err != nil { return err @@ -156,12 +152,12 @@ func (s *RuntimeStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { return err } if changed == 0 { - return ErrScrollNotFound + return domain.ErrRuntimeScrollNotFound } return nil } -func (s *RuntimeStateStore) DeleteScroll(id string) error { +func (s *StateStore) DeleteScroll(id string) error { db, err := s.open() if err != nil { return err @@ -177,12 +173,12 @@ func (s *RuntimeStateStore) DeleteScroll(id string) error { return err } if changed == 0 { - return ErrScrollNotFound + return domain.ErrRuntimeScrollNotFound } return nil } -func (s *RuntimeStateStore) open() (*sql.DB, error) { +func (s *StateStore) open() (*sql.DB, error) { if err := os.MkdirAll(s.stateDir, 0755); err != nil { return nil, err } diff --git a/internal/core/services/runtime_state_store_test.go b/internal/runtime/docker/state_store_test.go similarity index 73% rename from internal/core/services/runtime_state_store_test.go rename to internal/runtime/docker/state_store_test.go index 9d622634..a13e86df 100644 --- a/internal/core/services/runtime_state_store_test.go +++ b/internal/runtime/docker/state_store_test.go @@ -1,14 +1,17 @@ -package services_test +package docker import ( + "path/filepath" "testing" "github.com/highcard-dev/daemon/internal/core/domain" - "github.com/highcard-dev/daemon/internal/core/services" ) -func TestRuntimeStateStorePersistsCommandStatuses(t *testing.T) { - store := services.NewRuntimeStateStore(t.TempDir()) +func TestStateStorePersistsCommandStatuses(t *testing.T) { + store, err := NewStateStore(t.TempDir()) + if err != nil { + t.Fatal(err) + } exitCode := 2 scroll := &domain.RuntimeScroll{ ID: "test", @@ -56,3 +59,13 @@ func TestRuntimeStateStorePersistsCommandStatuses(t *testing.T) { t.Fatalf("scroll yaml = %q, want cached yaml", got.ScrollYAML) } } + +func TestStateStoreUsesSingleRuntimeRoot(t *testing.T) { + store, err := NewStateStore(t.TempDir()) + if err != nil { + t.Fatal(err) + } + if got, want := store.Root("scroll-a"), filepath.Join(store.StateDir(), "scrolls", "scroll-a"); got != want { + t.Fatalf("Root = %s, want %s", got, want) + } +} diff --git a/internal/runtime/kubernetes/backend.go b/internal/runtime/kubernetes/backend.go index 38600ee5..bb588b20 100644 --- a/internal/runtime/kubernetes/backend.go +++ b/internal/runtime/kubernetes/backend.go @@ -135,8 +135,16 @@ func (b *Backend) Name() string { return "kubernetes" } +func (b *Backend) RootRef(id string, namespace string) string { + if namespace == "" { + namespace = b.config.Namespace + } + return ref(namespace, dataPVCName(id)) +} + func (b *Backend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorkerAction) error { if err := b.config.ValidateForMaterialization(); err != nil { + logger.Log().Error("Kubernetes pull worker config invalid", zap.String("runtime_id", action.RuntimeID), zap.Error(err)) return err } if action.MountPath == "" { @@ -144,33 +152,66 @@ func (b *Backend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorke } namespace, pvc, err := parseRef(action.RootRef) if err != nil { + logger.Log().Error("Kubernetes pull worker root ref invalid", zap.String("runtime_id", action.RuntimeID), zap.String("root_ref", action.RootRef), zap.Error(err)) return err } + logger.Log().Info("Spawning Kubernetes pull worker", + zap.String("runtime_id", action.RuntimeID), + zap.String("mode", string(action.Mode)), + zap.String("namespace", namespace), + zap.String("pvc", pvc), + zap.String("artifact", action.Artifact), + ) + logger.Log().Debug("Kubernetes pull worker details", + zap.String("runtime_id", action.RuntimeID), + zap.String("root_ref", action.RootRef), + zap.String("mount_path", action.MountPath), + zap.String("pull_image", b.config.PullImage), + zap.Bool("registry_plain_http", b.config.RegistryPlainHTTP), + zap.Bool("has_registry_credentials", len(action.RegistryCredentials) > 0), + ) if action.Mode == ports.RuntimeWorkerModeCreate { - if err := b.ensurePVC(ctx, pvc); err != nil { + if err := b.ensurePVC(ctx, namespace, pvc); err != nil { + logger.Log().Error("Failed to ensure runtime PVC for pull worker", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Error(err)) return err } } registryConfigSecret, cleanupRegistryConfig, err := b.createRegistryConfigSecret(ctx, namespace, action.Artifact+action.RuntimeID, action.RegistryCredentials) if err != nil { + logger.Log().Error("Failed to create registry config secret for pull worker", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.Error(err)) return err } defer cleanupRegistryConfig() job := workerPullJobSpec(namespace, jobName("worker-pull", action.RootRef, shortHash(string(action.Mode)+action.Artifact)), pvc, b.config.PullImage, action, b.config.RegistrySecret, registryConfigSecret, b.config.RegistryPlainHTTP) - return b.runHelperJob(ctx, job) + logger.Log().Debug("Kubernetes pull worker job built", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("job", job.Name)) + if err := b.runHelperJob(ctx, job); err != nil { + logger.Log().Error("Kubernetes pull worker failed", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("job", job.Name), zap.Error(err)) + return err + } + logger.Log().Info("Kubernetes pull worker completed", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("job", job.Name)) + return nil } func (b *Backend) ReadScrollFile(root string) ([]byte, error) { namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot read scroll.yaml from invalid Kubernetes root", zap.String("root", root), zap.Error(err)) return nil, err } job := readScrollJobSpec(namespace, jobName("read", root, "scroll-yaml"), pvc, b.config.HelperImage) - return b.runJobAndLogs(context.Background(), job) + logger.Log().Debug("Reading scroll.yaml through Kubernetes helper job", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("job", job.Name)) + logs, err := b.runJobAndLogs(context.Background(), job) + if err != nil { + logger.Log().Error("Failed to read scroll.yaml through Kubernetes helper job", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("job", job.Name), zap.Error(err)) + return logs, err + } + logger.Log().Debug("Read scroll.yaml through Kubernetes helper job", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("job", job.Name), zap.Int("bytes", len(logs))) + return logs, nil } func (b *Backend) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { if b.config.PullImage == "" { + logger.Log().Error("Cannot start Kubernetes dev server without pull image", zap.String("runtime_id", action.RuntimeID), zap.String("root_ref", action.RootRef)) return fmt.Errorf("kubernetes dev requires --k8s-pull-image or DRUID_K8S_PULL_IMAGE") } if action.MountPath == "" { @@ -181,112 +222,232 @@ func (b *Backend) StartDev(ctx context.Context, action ports.RuntimeDevAction) e } namespace, pvc, err := parseRef(action.RootRef) if err != nil { + logger.Log().Error("Kubernetes dev root ref invalid", zap.String("runtime_id", action.RuntimeID), zap.String("root_ref", action.RootRef), zap.Error(err)) return err } + logger.Log().Info("Starting Kubernetes dev server", + zap.String("runtime_id", action.RuntimeID), + zap.String("namespace", namespace), + zap.String("pvc", pvc), + zap.String("listen", action.Listen), + ) + logger.Log().Debug("Kubernetes dev server details", + zap.String("runtime_id", action.RuntimeID), + zap.String("root_ref", action.RootRef), + zap.String("mount_path", action.MountPath), + zap.Strings("watch_paths", action.WatchPaths), + zap.Strings("commands", action.HotReloadCommands), + zap.String("image", b.config.PullImage), + ) sts := devStatefulSetSpec(namespace, action.RootRef, pvc, b.config.PullImage, action, b.config.RegistrySecret) existing, err := b.client.AppsV1().StatefulSets(namespace).Get(ctx, sts.Name, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): + logger.Log().Info("Creating Kubernetes dev StatefulSet", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name)) if _, err := b.client.AppsV1().StatefulSets(namespace).Create(ctx, sts, metav1.CreateOptions{}); err != nil { + logger.Log().Error("Failed to create Kubernetes dev StatefulSet", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name), zap.Error(err)) return err } case err != nil: + logger.Log().Error("Failed to get Kubernetes dev StatefulSet", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name), zap.Error(err)) return err default: + logger.Log().Info("Updating Kubernetes dev StatefulSet", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name), zap.String("resource_version", existing.ResourceVersion)) sts.ResourceVersion = existing.ResourceVersion if _, err := b.client.AppsV1().StatefulSets(namespace).Update(ctx, sts, metav1.UpdateOptions{}); err != nil { + logger.Log().Error("Failed to update Kubernetes dev StatefulSet", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name), zap.Error(err)) return err } } service := devServiceSpec(namespace, action.RootRef, pvc) if err := b.reconcileService(ctx, service); err != nil { + logger.Log().Error("Failed to reconcile Kubernetes dev Service", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("service", service.Name), zap.Error(err)) return err } - return b.waitForStatefulSet(ctx, sts.Name) + if err := b.waitForStatefulSet(ctx, namespace, sts.Name); err != nil { + logger.Log().Error("Kubernetes dev StatefulSet did not become ready", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name), zap.Error(err)) + return err + } + logger.Log().Info("Kubernetes dev server ready", zap.String("runtime_id", action.RuntimeID), zap.String("namespace", namespace), zap.String("statefulset", sts.Name), zap.String("service", service.Name)) + return nil } func (b *Backend) StopDev(ctx context.Context, root string) error { namespace, _, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot stop Kubernetes dev server for invalid root", zap.String("root", root), zap.Error(err)) return err } propagation := metav1.DeletePropagationBackground - _ = b.client.AppsV1().StatefulSets(namespace).Delete(ctx, devStatefulSetName(root), metav1.DeleteOptions{PropagationPolicy: &propagation}) - _ = b.client.CoreV1().Services(namespace).Delete(ctx, serviceName(root, "dev", "webdav"), metav1.DeleteOptions{}) + statefulSetName := devStatefulSetName(root) + serviceName := serviceName(root, "dev", "webdav") + logger.Log().Info("Stopping Kubernetes dev server", zap.String("namespace", namespace), zap.String("statefulset", statefulSetName), zap.String("service", serviceName)) + if err := b.client.AppsV1().StatefulSets(namespace).Delete(ctx, statefulSetName, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Warn("Failed to delete Kubernetes dev StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", statefulSetName), zap.Error(err)) + } + if err := b.client.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Warn("Failed to delete Kubernetes dev Service", zap.String("namespace", namespace), zap.String("service", serviceName), zap.Error(err)) + } return nil } func (b *Backend) reconcileService(ctx context.Context, service *corev1.Service) error { + logger.Log().Debug("Reconciling Kubernetes Service", zap.String("namespace", service.Namespace), zap.String("service", service.Name), zap.Any("selector", service.Spec.Selector), zap.Int("ports", len(service.Spec.Ports))) existing, err := b.client.CoreV1().Services(service.Namespace).Get(ctx, service.Name, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): + logger.Log().Info("Creating Kubernetes Service", zap.String("namespace", service.Namespace), zap.String("service", service.Name)) _, err := b.client.CoreV1().Services(service.Namespace).Create(ctx, service, metav1.CreateOptions{}) + if err != nil { + logger.Log().Error("Failed to create Kubernetes Service", zap.String("namespace", service.Namespace), zap.String("service", service.Name), zap.Error(err)) + } return err case err != nil: + logger.Log().Error("Failed to get Kubernetes Service", zap.String("namespace", service.Namespace), zap.String("service", service.Name), zap.Error(err)) return err } + logger.Log().Info("Updating Kubernetes Service", zap.String("namespace", service.Namespace), zap.String("service", service.Name), zap.String("resource_version", existing.ResourceVersion)) service.ResourceVersion = existing.ResourceVersion service.Spec.ClusterIP = existing.Spec.ClusterIP service.Spec.ClusterIPs = existing.Spec.ClusterIPs service.Spec.IPFamilies = existing.Spec.IPFamilies service.Spec.IPFamilyPolicy = existing.Spec.IPFamilyPolicy _, err = b.client.CoreV1().Services(service.Namespace).Update(ctx, service, metav1.UpdateOptions{}) + if err != nil { + logger.Log().Error("Failed to update Kubernetes Service", zap.String("namespace", service.Namespace), zap.String("service", service.Name), zap.Error(err)) + } return err } func (b *Backend) RunCommand(command ports.RuntimeCommand) (*int, error) { + if command.Command == nil { + err := fmt.Errorf("kubernetes command %s has no instruction set", command.Name) + logger.Log().Error("Cannot run Kubernetes command", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.Error(err)) + return nil, err + } + logger.Log().Info("Running Kubernetes command", + zap.String("scroll_id", command.ScrollID), + zap.String("command", command.Name), + zap.String("run_mode", string(command.Command.Run)), + zap.String("root", command.Root), + zap.Int("procedures", len(command.Command.Procedures)), + ) + portUse := expectedPortUse(command.Command) for idx, procedure := range command.Command.Procedures { + if procedure == nil { + logger.Log().Warn("Skipping nil Kubernetes procedure", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.Int("procedure_index", idx)) + continue + } procedureName := domain.ProcedureName(command.Name, idx, procedure) + resourceName := procedureResourceName(command.Root, command.Name, idx) env := command.ProcedureEnv[procedureName] if env == nil { env = procedure.Env } + logger.Log().Debug("Kubernetes procedure selected", + zap.String("scroll_id", command.ScrollID), + zap.String("command", command.Name), + zap.String("procedure", procedureName), + zap.String("resource", resourceName), + zap.String("run_mode", string(command.Command.Run)), + zap.String("image", procedure.Image), + zap.Bool("persistent", command.Command.Run == domain.RunModePersistent), + zap.Bool("signal", procedure.IsSignal()), + zap.Bool("ignore_failure", procedure.IgnoreFailure), + zap.Int("env_count", len(env)), + zap.Int("expected_ports", len(procedure.ExpectedPorts)), + zap.Int("mounts", len(procedure.Mounts)), + ) if command.Command.Run == domain.RunModePersistent { if procedure.IsSignal() { if err := b.Signal(procedureName, procedure.Target, procedure.Signal, command.Root); err != nil { + logger.Log().Error("Kubernetes signal procedure failed", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.String("target", procedure.Target), zap.String("signal", procedure.Signal), zap.Error(err)) return nil, err } + logger.Log().Info("Kubernetes signal procedure completed", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.String("target", procedure.Target), zap.String("signal", procedure.Signal)) continue } if procedure.Image == "" { - return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) + err := fmt.Errorf("kubernetes procedure %s requires image", procedureName) + logger.Log().Error("Kubernetes persistent procedure missing image", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.Error(err)) + return nil, err } - if err := b.ensurePersistentProcedure(context.Background(), command.ScrollID, command.Root, procedureName, procedure, command.GlobalPorts, env); err != nil { + if err := b.ensurePersistentProcedure(context.Background(), command.ScrollID, command.Root, command.Name, procedureName, resourceName, procedure, command.GlobalPorts, env, portUse); err != nil { + logger.Log().Error("Kubernetes persistent procedure failed", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.Error(err)) return nil, err } continue } - exitCode, err := b.runJobProcedure(command.ScrollID, procedureName, procedure, command.Root, command.GlobalPorts, env) + exitCode, err := b.runJobProcedure(command.ScrollID, command.Name, procedureName, resourceName, procedure, command.Root, command.GlobalPorts, env, portUse) if err != nil { + logger.Log().Error("Kubernetes job procedure failed", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.Any("exit_code", exitCode), zap.Error(err)) return exitCode, err } if exitCode != nil && *exitCode != 0 { if procedure.IgnoreFailure { + logger.Log().Warn("Kubernetes job procedure failed but failure is ignored", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.Int("exit_code", *exitCode)) continue } + logger.Log().Warn("Kubernetes command stopped after non-zero procedure exit", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.Int("exit_code", *exitCode)) return exitCode, nil } + if exitCode != nil { + logger.Log().Info("Kubernetes job procedure completed", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name), zap.String("procedure", procedureName), zap.Int("exit_code", *exitCode)) + } } + logger.Log().Info("Kubernetes command completed", zap.String("scroll_id", command.ScrollID), zap.String("command", command.Name)) return nil, nil } -func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string) (*int, error) { +func (b *Backend) runJobProcedure(scrollID string, commandName string, procedureName string, resourceName string, procedure *domain.Procedure, root string, globalPorts []domain.Port, env map[string]string, portUse map[string]int) (*int, error) { if procedure.IsSignal() { - return nil, b.Signal(procedureName, procedure.Target, procedure.Signal, root) + logger.Log().Info("Running Kubernetes signal procedure", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("target", procedure.Target), zap.String("signal", procedure.Signal)) + if err := b.Signal(procedureName, procedure.Target, procedure.Signal, root); err != nil { + logger.Log().Error("Kubernetes signal procedure failed", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.Error(err)) + return nil, err + } + return nil, nil } if procedure.Image == "" { - return nil, fmt.Errorf("kubernetes procedure %s requires image", procedureName) + err := fmt.Errorf("kubernetes procedure %s requires image", procedureName) + logger.Log().Error("Kubernetes job procedure missing image", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.Error(err)) + return nil, err } ctx := context.Background() - if err := b.ensureExpectedServices(ctx, root, procedureName, procedure, globalPorts); err != nil { + if err := b.ensureExpectedServices(ctx, root, commandName, procedureName, procedure, globalPorts, portUse); err != nil { + logger.Log().Error("Failed to reconcile Kubernetes procedure Services", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.Error(err)) return nil, err } - job, err := procedureJobSpec(b.config.Namespace, root, procedureName, procedure, env, b.config.RegistrySecret) + namespace, _, err := parseRef(root) if err != nil { + logger.Log().Error("Kubernetes job procedure root ref invalid", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("root", root), zap.Error(err)) return nil, err } + job, err := procedureJobSpec(namespace, root, commandName, procedureName, resourceName, procedure, env, b.config.RegistrySecret) + if err != nil { + logger.Log().Error("Failed to build Kubernetes procedure Job", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.Error(err)) + return nil, err + } + logger.Log().Info("Starting Kubernetes job procedure", + zap.String("scroll_id", scrollID), + zap.String("command", commandName), + zap.String("procedure", procedureName), + zap.String("namespace", namespace), + zap.String("job", job.Name), + ) + logger.Log().Debug("Kubernetes job procedure details", + zap.String("scroll_id", scrollID), + zap.String("command", commandName), + zap.String("procedure", procedureName), + zap.String("resource", resourceName), + zap.String("image", procedure.Image), + zap.Int("env_count", len(env)), + zap.Int("expected_ports", len(procedure.ExpectedPorts)), + zap.Int("mounts", len(procedure.Mounts)), + ) createdJob, err := b.createFreshJob(ctx, job) if err != nil { + logger.Log().Error("Failed to create Kubernetes job procedure", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", job.Name), zap.Error(err)) return nil, err } output := make(chan string, 100) @@ -296,12 +457,16 @@ func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedu return b.attachToProcedure(root, procedureName, data) } streamStarted := false - podName, err := b.waitForJobPod(ctx, job.Name, string(createdJob.UID)) + jobName := createdJob.Name + podName, err := b.waitForJobPod(ctx, namespace, jobName, string(createdJob.UID)) if err == nil { streamStarted = true - go b.streamPodLogs(ctx, podName, output) + logger.Log().Debug("Streaming Kubernetes job procedure logs", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", jobName), zap.String("pod", podName), zap.String("console_id", consoleID)) + go b.streamPodLogs(ctx, namespace, podName, output) + } else { + logger.Log().Warn("Could not find Kubernetes job pod before wait; console logs may be empty", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", jobName), zap.Error(err)) } - exitCode, err := b.waitForJob(ctx, job.Name) + exitCode, err := b.waitForJob(ctx, namespace, jobName) if exitCode != nil { console.MarkExited(*exitCode) } @@ -310,30 +475,72 @@ func (b *Backend) runJobProcedure(scrollID string, procedureName string, procedu } <-doneChan if err != nil { + if exitCode != nil && *exitCode == 0 { + b.deleteFinishedJob(context.Background(), namespace, jobName) + } + if exitCode != nil && *exitCode != 0 { + logger.Log().Warn("Keeping failed Kubernetes job procedure for debugging", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", jobName), zap.Int("exit_code", *exitCode)) + } + logger.Log().Error("Kubernetes job procedure ended with error", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", jobName), zap.Any("exit_code", exitCode), zap.Error(err)) return exitCode, err } + if exitCode != nil && *exitCode == 0 { + b.deleteFinishedJob(context.Background(), namespace, jobName) + logger.Log().Info("Kubernetes job procedure exited", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", jobName), zap.Int("exit_code", *exitCode)) + } else if exitCode != nil { + logger.Log().Warn("Keeping failed Kubernetes job procedure for debugging", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("job", jobName), zap.Int("exit_code", *exitCode)) + } return exitCode, nil } -func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string, root string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port, env map[string]string) error { - if err := b.ensureExpectedServices(ctx, root, procedureName, procedure, globalPorts); err != nil { +func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string, root string, commandName string, procedureName string, resourceName string, procedure *domain.Procedure, globalPorts []domain.Port, env map[string]string, portUse map[string]int) error { + if err := b.ensureExpectedServices(ctx, root, commandName, procedureName, procedure, globalPorts, portUse); err != nil { + logger.Log().Error("Failed to reconcile Kubernetes persistent procedure Services", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.Error(err)) return err } - statefulSet, err := procedureStatefulSetSpec(b.config.Namespace, root, procedureName, procedure, env, b.config.RegistrySecret) + namespace, _, err := parseRef(root) if err != nil { + logger.Log().Error("Kubernetes persistent procedure root ref invalid", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("root", root), zap.Error(err)) return err } - existing, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Get(ctx, statefulSet.Name, metav1.GetOptions{}) + statefulSet, err := procedureStatefulSetSpec(namespace, root, commandName, procedureName, resourceName, procedure, env, b.config.RegistrySecret) + if err != nil { + logger.Log().Error("Failed to build Kubernetes persistent procedure StatefulSet", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.Error(err)) + return err + } + logger.Log().Info("Reconciling Kubernetes persistent procedure", + zap.String("scroll_id", scrollID), + zap.String("command", commandName), + zap.String("procedure", procedureName), + zap.String("namespace", namespace), + zap.String("statefulset", statefulSet.Name), + ) + logger.Log().Debug("Kubernetes persistent procedure details", + zap.String("scroll_id", scrollID), + zap.String("command", commandName), + zap.String("procedure", procedureName), + zap.String("resource", resourceName), + zap.String("image", procedure.Image), + zap.Int("env_count", len(env)), + zap.Int("expected_ports", len(procedure.ExpectedPorts)), + zap.Int("mounts", len(procedure.Mounts)), + ) + existing, err := b.client.AppsV1().StatefulSets(namespace).Get(ctx, statefulSet.Name, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): - if _, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Create(ctx, statefulSet, metav1.CreateOptions{}); err != nil { + logger.Log().Info("Creating Kubernetes persistent procedure StatefulSet", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name)) + if _, err := b.client.AppsV1().StatefulSets(namespace).Create(ctx, statefulSet, metav1.CreateOptions{}); err != nil { + logger.Log().Error("Failed to create Kubernetes persistent procedure StatefulSet", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.Error(err)) return err } case err != nil: + logger.Log().Error("Failed to get Kubernetes persistent procedure StatefulSet", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.Error(err)) return err default: + logger.Log().Info("Updating Kubernetes persistent procedure StatefulSet", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.String("resource_version", existing.ResourceVersion)) statefulSet.ResourceVersion = existing.ResourceVersion - if _, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Update(ctx, statefulSet, metav1.UpdateOptions{}); err != nil { + if _, err := b.client.AppsV1().StatefulSets(namespace).Update(ctx, statefulSet, metav1.UpdateOptions{}); err != nil { + logger.Log().Error("Failed to update Kubernetes persistent procedure StatefulSet", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.Error(err)) return err } } @@ -342,27 +549,31 @@ func (b *Backend) ensurePersistentProcedure(ctx context.Context, scrollID string console.WriteInput = func(data string) error { return b.attachToProcedure(root, procedureName, data) } - if err := b.waitForStatefulSet(ctx, statefulSet.Name); err != nil { + if err := b.waitForStatefulSet(ctx, namespace, statefulSet.Name); err != nil { close(output) + logger.Log().Error("Kubernetes persistent procedure did not become ready", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.Error(err)) return err } + logger.Log().Info("Kubernetes persistent procedure ready", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name)) go func() { - podName, err := b.waitForPodBySelector(context.Background(), labels.SelectorFromSet(labels.Set{ + podName, err := b.waitForPodBySelector(context.Background(), namespace, labels.SelectorFromSet(labels.Set{ labelScrollID: statefulSet.Labels[labelScrollID], labelProcedure: statefulSet.Labels[labelProcedure], }).String()) if err != nil { + logger.Log().Warn("Failed to find Kubernetes persistent procedure pod for logs", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.Error(err)) output <- fmt.Sprintf("failed to find StatefulSet pod logs: %v", err) close(output) return } - b.streamPodLogs(context.Background(), podName, output) + logger.Log().Debug("Streaming Kubernetes persistent procedure logs", zap.String("scroll_id", scrollID), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("namespace", namespace), zap.String("pod", podName)) + b.streamPodLogs(context.Background(), namespace, podName, output) }() return nil } func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimePortStatus, error) { - _, pvc, err := parseRef(root) + namespace, pvc, err := parseRef(root) if err != nil { return nil, err } @@ -377,6 +588,7 @@ func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.Command if command == nil { continue } + portUse := expectedPortUse(command) for idx, procedure := range command.Procedures { if procedure == nil || len(procedure.ExpectedPorts) == 0 { continue @@ -398,7 +610,8 @@ func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.Command KeepAliveTraffic: expectedPort.KeepAliveTraffic, Source: "kubernetes-service", } - serviceReady, hostPort := b.serviceReady(context.Background(), serviceName(root, procedureName, expectedPort.Name)) + serviceProcedure := serviceProcedureName(commandName, procedureName, expectedPort.Name, portUse) + serviceReady, hostPort := b.serviceReady(context.Background(), namespace, serviceName(root, serviceProcedure, expectedPort.Name)) status.Bound = serviceReady status.HostPort = hostPort if !hubbleAvailable { @@ -416,7 +629,7 @@ func (b *Backend) ExpectedPorts(root string, commands map[string]*domain.Command status.TrafficWindow = threshold.Window.String() } traffic, err := b.hubble.HasFlow(context.Background(), TrafficQuery{ - Namespace: b.config.Namespace, + Namespace: namespace, ScrollID: pvc, ProcedureName: procedureName, Port: port, @@ -475,6 +688,7 @@ func (b *Backend) RoutingTargets(root string, commands map[string]*domain.Comman if command == nil { continue } + portUse := expectedPortUse(command) for idx, procedure := range command.Procedures { if procedure == nil || len(procedure.ExpectedPorts) == 0 { continue @@ -489,16 +703,7 @@ func (b *Backend) RoutingTargets(root string, commands map[string]*domain.Comman return nil, fmt.Errorf("expected port %s is not defined in top-level ports", expectedPort.Name) } seen[expectedPort.Name] = struct{}{} - selector := map[string]string{ - labelManagedBy: "druid", - labelComponent: "runtime", - labelScrollID: dnsLabel(pvc), - } - if len(procedure.ExpectedPorts) == 1 { - selector[labelPortName] = dnsLabel(expectedPort.Name) - } else { - selector[labelProcedure] = dnsLabel(procedureName) - } + serviceProcedure := serviceProcedureName(commandName, procedureName, expectedPort.Name, portUse) targets = append(targets, domain.RuntimeRoutingTarget{ Name: expectedPort.Name, Procedure: procedureName, @@ -506,9 +711,9 @@ func (b *Backend) RoutingTargets(root string, commands map[string]*domain.Comman Port: port.Port, Protocol: normalizeProtocol(port.Protocol), Namespace: namespace, - ServiceName: serviceName(root, procedureName, expectedPort.Name), + ServiceName: serviceName(root, serviceProcedure, expectedPort.Name), ServicePort: port.Port, - Selector: selector, + Selector: serviceSelector(pvc, commandName, procedureName, expectedPort.Name, portUse), }) } } @@ -517,36 +722,50 @@ func (b *Backend) RoutingTargets(root string, commands map[string]*domain.Comman } func (b *Backend) StopRuntime(root string) error { + logger.Log().Info("Stopping Kubernetes runtime", zap.String("root", root)) propagation := metav1.DeletePropagationBackground options := metav1.DeleteOptions{PropagationPolicy: &propagation} if err := b.deleteRuntimeJobs(context.Background(), root, options); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime jobs", zap.String("root", root), zap.Error(err)) return err } if err := b.deleteRuntimeStatefulSets(context.Background(), root, options); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime StatefulSets", zap.String("root", root), zap.Error(err)) + return err + } + if err := b.deleteRuntimePodsByScroll(context.Background(), root, options); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime pods", zap.String("root", root), zap.Error(err)) return err } - return b.deleteRuntimePodsByScroll(context.Background(), root, options) + logger.Log().Info("Stopped Kubernetes runtime", zap.String("root", root)) + return nil } func (b *Backend) DeleteRuntime(root string, purgeData bool) error { + logger.Log().Info("Deleting Kubernetes runtime", zap.String("root", root), zap.Bool("purge_data", purgeData)) propagation := metav1.DeletePropagationBackground options := metav1.DeleteOptions{PropagationPolicy: &propagation} if err := b.StopRuntime(root); err != nil { return err } if err := b.deleteRuntimeServices(context.Background(), root, options); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime Services", zap.String("root", root), zap.Error(err)) return err } if purgeData { namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot purge Kubernetes runtime data for invalid root", zap.String("root", root), zap.Error(err)) return err } + logger.Log().Info("Deleting Kubernetes runtime PVC", zap.String("namespace", namespace), zap.String("pvc", pvc)) err = b.client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.Background(), pvc, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to delete Kubernetes runtime PVC", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Error(err)) return err } } + logger.Log().Info("Deleted Kubernetes runtime", zap.String("root", root), zap.Bool("purge_data", purgeData)) return nil } @@ -559,15 +778,23 @@ func (b *Backend) BackupRuntime(ctx context.Context, root string, artifact strin } namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot backup Kubernetes runtime for invalid root", zap.String("root", root), zap.String("artifact", artifact), zap.Error(err)) return err } + logger.Log().Info("Backing up Kubernetes runtime", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("artifact", artifact)) registryConfigSecret, cleanupRegistryConfig, err := b.createRegistryConfigSecret(ctx, namespace, artifact+root, registryCredentials) if err != nil { + logger.Log().Error("Failed to create registry config secret for Kubernetes backup", zap.String("namespace", namespace), zap.String("artifact", artifact), zap.Error(err)) return err } defer cleanupRegistryConfig() job := backupJobSpec(namespace, jobName("backup", root, shortHash(artifact)), pvc, b.config.PullImage, artifact, b.config.RegistrySecret, registryConfigSecret, b.config.RegistryPlainHTTP) - return b.runHelperJob(ctx, job) + if err := b.runHelperJob(ctx, job); err != nil { + logger.Log().Error("Kubernetes runtime backup failed", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("artifact", artifact), zap.String("job", job.Name), zap.Error(err)) + return err + } + logger.Log().Info("Kubernetes runtime backup completed", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("artifact", artifact), zap.String("job", job.Name)) + return nil } func (b *Backend) RestoreRuntime(ctx context.Context, root string, artifact string, registryCredentials []domain.RegistryCredential) error { @@ -579,62 +806,85 @@ func (b *Backend) RestoreRuntime(ctx context.Context, root string, artifact stri } namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot restore Kubernetes runtime for invalid root", zap.String("root", root), zap.String("artifact", artifact), zap.Error(err)) return err } + logger.Log().Info("Restoring Kubernetes runtime", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("artifact", artifact)) stagePVC := stagingPVCName("restore:" + root + ":" + artifact) - if err := b.ensurePVC(ctx, stagePVC); err != nil { + if err := b.ensurePVC(ctx, namespace, stagePVC); err != nil { + logger.Log().Error("Failed to create Kubernetes restore staging PVC", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("stage_pvc", stagePVC), zap.Error(err)) return err } - defer b.client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.Background(), stagePVC, metav1.DeleteOptions{}) + defer func() { + if err := b.client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.Background(), stagePVC, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Warn("Failed to delete Kubernetes restore staging PVC", zap.String("namespace", namespace), zap.String("stage_pvc", stagePVC), zap.Error(err)) + } + }() registryConfigSecret, cleanupRegistryConfig, err := b.createRegistryConfigSecret(ctx, namespace, artifact+root, registryCredentials) if err != nil { + logger.Log().Error("Failed to create registry config secret for Kubernetes restore", zap.String("namespace", namespace), zap.String("artifact", artifact), zap.Error(err)) return err } defer cleanupRegistryConfig() pullJob := pullJobSpec(namespace, jobName("restore-pull", ref(namespace, stagePVC), shortHash(artifact)), stagePVC, b.config.PullImage, artifact, b.config.RegistrySecret, registryConfigSecret, b.config.RegistryPlainHTTP) if err := b.runHelperJob(ctx, pullJob); err != nil { + logger.Log().Error("Kubernetes restore pull failed", zap.String("namespace", namespace), zap.String("artifact", artifact), zap.String("job", pullJob.Name), zap.Error(err)) return err } if err := b.StopRuntime(root); err != nil { + logger.Log().Error("Failed to stop Kubernetes runtime before restore", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Error(err)) return err } restoreJob := replacePVCJobSpec(namespace, jobName("restore-copy", root, shortHash(artifact)), stagePVC, pvc, b.config.HelperImage) - return b.runHelperJob(ctx, restoreJob) + if err := b.runHelperJob(ctx, restoreJob); err != nil { + logger.Log().Error("Kubernetes restore copy failed", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("stage_pvc", stagePVC), zap.String("job", restoreJob.Name), zap.Error(err)) + return err + } + logger.Log().Info("Kubernetes runtime restore completed", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("artifact", artifact)) + return nil } func (b *Backend) Attach(commandName string, data string) error { + logger.Log().Debug("Attaching to Kubernetes procedure by command name", zap.String("command", commandName), zap.Int("bytes", len(data))) pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{labelProcedure: dnsLabel(commandName)}).String(), }) if err != nil { + logger.Log().Error("Failed to list Kubernetes pods for attach", zap.String("namespace", b.config.Namespace), zap.String("command", commandName), zap.Error(err)) return err } for _, pod := range pods.Items { if pod.Status.Phase == corev1.PodRunning { - return b.attachToPod(context.Background(), pod.Name, data) + logger.Log().Debug("Attaching to Kubernetes pod", zap.String("namespace", b.config.Namespace), zap.String("pod", pod.Name), zap.String("command", commandName), zap.Int("bytes", len(data))) + return b.attachToPod(context.Background(), b.config.Namespace, pod.Name, data) } } + logger.Log().Warn("No running Kubernetes pod found for attach", zap.String("namespace", b.config.Namespace), zap.String("command", commandName), zap.Int("pods", len(pods.Items))) return fmt.Errorf("no running pod found for console %s", commandName) } func (b *Backend) attachToProcedure(root string, procedureName string, data string) error { - _, pvc, err := parseRef(root) + namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot attach to Kubernetes procedure for invalid root", zap.String("root", root), zap.String("procedure", procedureName), zap.Error(err)) return err } selector := baseLabels(pvc) selector[labelProcedure] = dnsLabel(procedureName) - podName, err := b.waitForPodBySelector(context.Background(), labels.SelectorFromSet(selector).String()) + podName, err := b.waitForPodBySelector(context.Background(), namespace, labels.SelectorFromSet(selector).String()) if err != nil { + logger.Log().Error("Failed to find Kubernetes procedure pod for attach", zap.String("namespace", namespace), zap.String("procedure", procedureName), zap.Any("selector", selector), zap.Error(err)) return err } - return b.attachToPod(context.Background(), podName, data) + logger.Log().Debug("Attaching to Kubernetes procedure pod", zap.String("namespace", namespace), zap.String("pod", podName), zap.String("procedure", procedureName), zap.Int("bytes", len(data))) + return b.attachToPod(context.Background(), namespace, podName, data) } -func (b *Backend) attachToPod(ctx context.Context, podName string, data string) error { +func (b *Backend) attachToPod(ctx context.Context, namespace string, podName string, data string) error { + logger.Log().Debug("Opening Kubernetes pod attach stream", zap.String("namespace", namespace), zap.String("pod", podName), zap.Int("bytes", len(data))) req := b.client.CoreV1().RESTClient().Post(). Resource("pods"). - Namespace(b.config.Namespace). + Namespace(namespace). Name(podName). SubResource("attach"). VersionedParams(&corev1.PodAttachOptions{ @@ -646,17 +896,25 @@ func (b *Backend) attachToPod(ctx context.Context, podName string, data string) }, k8sscheme.ParameterCodec) exec, err := remotecommand.NewSPDYExecutor(b.restConfig, "POST", req.URL()) if err != nil { + logger.Log().Error("Failed to create Kubernetes pod attach executor", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) return err } - return exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + if err := exec.StreamWithContext(ctx, remotecommand.StreamOptions{ Stdin: strings.NewReader(data), - }) + }); err != nil { + logger.Log().Error("Kubernetes pod attach stream failed", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) + return err + } + logger.Log().Debug("Kubernetes pod attach stream completed", zap.String("namespace", namespace), zap.String("pod", podName)) + return nil } func (b *Backend) Signal(_ string, target string, signal string, root string) error { if target == "" { + logger.Log().Warn("Ignoring Kubernetes signal with empty target", zap.String("root", root), zap.String("signal", signal)) return nil } + logger.Log().Info("Sending Kubernetes runtime signal", zap.String("root", root), zap.String("target", target), zap.String("signal", signal)) switch signal { case "", "SIGTERM", "TERM": propagation := metav1.DeletePropagationBackground @@ -666,18 +924,49 @@ func (b *Backend) Signal(_ string, target string, signal string, root string) er propagation := metav1.DeletePropagationBackground return b.deleteRuntimeWorkload(context.Background(), root, target, metav1.DeleteOptions{GracePeriodSeconds: &grace, PropagationPolicy: &propagation}) default: + logger.Log().Error("Unsupported Kubernetes signal", zap.String("root", root), zap.String("target", target), zap.String("signal", signal)) return fmt.Errorf("kubernetes signal %s is unsupported without pod exec", signal) } } func (b *Backend) deleteRuntimeWorkload(ctx context.Context, root string, target string, options metav1.DeleteOptions) error { - jobErr := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, jobName("proc", root, target), options) - if apierrors.IsNotFound(jobErr) { - jobErr = nil + namespace, pvc, err := parseRef(root) + if err != nil { + logger.Log().Error("Cannot delete Kubernetes runtime workload for invalid root", zap.String("root", root), zap.String("target", target), zap.Error(err)) + return err } - statefulSetErr := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, statefulSetName(root, target), options) - if apierrors.IsNotFound(statefulSetErr) { - statefulSetErr = nil + selector := labels.SelectorFromSet(labels.Set{ + labelScrollID: dnsLabel(pvc), + labelProcedure: dnsLabel(target), + }).String() + logger.Log().Info("Deleting Kubernetes runtime workload", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("target", target), zap.String("selector", selector)) + jobs, jobErr := b.client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if jobErr == nil { + logger.Log().Debug("Deleting Kubernetes workload jobs", zap.String("namespace", namespace), zap.String("target", target), zap.Int("jobs", len(jobs.Items))) + for _, job := range jobs.Items { + if err := b.client.BatchV1().Jobs(namespace).Delete(ctx, job.Name, options); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to delete Kubernetes workload job", zap.String("namespace", namespace), zap.String("job", job.Name), zap.String("target", target), zap.Error(err)) + jobErr = err + break + } + logger.Log().Debug("Deleted Kubernetes workload job", zap.String("namespace", namespace), zap.String("job", job.Name), zap.String("target", target)) + } + } else { + logger.Log().Error("Failed to list Kubernetes workload jobs", zap.String("namespace", namespace), zap.String("target", target), zap.Error(jobErr)) + } + statefulSets, statefulSetErr := b.client.AppsV1().StatefulSets(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + if statefulSetErr == nil { + logger.Log().Debug("Deleting Kubernetes workload StatefulSets", zap.String("namespace", namespace), zap.String("target", target), zap.Int("statefulsets", len(statefulSets.Items))) + for _, statefulSet := range statefulSets.Items { + if err := b.client.AppsV1().StatefulSets(namespace).Delete(ctx, statefulSet.Name, options); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to delete Kubernetes workload StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.String("target", target), zap.Error(err)) + statefulSetErr = err + break + } + logger.Log().Debug("Deleted Kubernetes workload StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", statefulSet.Name), zap.String("target", target)) + } + } else { + logger.Log().Error("Failed to list Kubernetes workload StatefulSets", zap.String("namespace", namespace), zap.String("target", target), zap.Error(statefulSetErr)) } podErr := b.deleteRuntimePods(ctx, root, target, options) if jobErr != nil { @@ -690,29 +979,38 @@ func (b *Backend) deleteRuntimeWorkload(ctx context.Context, root string, target } func (b *Backend) deleteRuntimePods(ctx context.Context, root string, target string, options metav1.DeleteOptions) error { - _, pvc, err := parseRef(root) + namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot delete Kubernetes runtime pods for invalid root", zap.String("root", root), zap.String("target", target), zap.Error(err)) return err } selector := labels.SelectorFromSet(labels.Set{ labelScrollID: dnsLabel(pvc), labelProcedure: dnsLabel(target), }).String() - pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + pods, err := b.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { + logger.Log().Error("Failed to list Kubernetes runtime pods", zap.String("namespace", namespace), zap.String("target", target), zap.String("selector", selector), zap.Error(err)) return err } + logger.Log().Debug("Deleting Kubernetes runtime pods", zap.String("namespace", namespace), zap.String("target", target), zap.String("selector", selector), zap.Int("pods", len(pods.Items))) for _, pod := range pods.Items { - if err := b.client.CoreV1().Pods(b.config.Namespace).Delete(ctx, pod.Name, options); err != nil && !apierrors.IsNotFound(err) { + if err := b.client.CoreV1().Pods(namespace).Delete(ctx, pod.Name, options); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to delete Kubernetes runtime pod", zap.String("namespace", namespace), zap.String("pod", pod.Name), zap.String("target", target), zap.Error(err)) return err } + logger.Log().Debug("Deleted Kubernetes runtime pod", zap.String("namespace", namespace), zap.String("pod", pod.Name), zap.String("target", target)) } return nil } func (b *Backend) deleteRuntimeJobs(ctx context.Context, root string, options metav1.DeleteOptions) error { + namespace, _, err := parseRef(root) + if err != nil { + return err + } return b.deleteRuntimeObjects(ctx, root, func(name string) error { - err := b.client.BatchV1().Jobs(b.config.Namespace).Delete(ctx, name, options) + err := b.client.BatchV1().Jobs(namespace).Delete(ctx, name, options) if apierrors.IsNotFound(err) { return nil } @@ -721,8 +1019,12 @@ func (b *Backend) deleteRuntimeJobs(ctx context.Context, root string, options me } func (b *Backend) deleteRuntimeStatefulSets(ctx context.Context, root string, options metav1.DeleteOptions) error { + namespace, _, err := parseRef(root) + if err != nil { + return err + } return b.deleteRuntimeObjects(ctx, root, func(name string) error { - err := b.client.AppsV1().StatefulSets(b.config.Namespace).Delete(ctx, name, options) + err := b.client.AppsV1().StatefulSets(namespace).Delete(ctx, name, options) if apierrors.IsNotFound(err) { return nil } @@ -731,8 +1033,12 @@ func (b *Backend) deleteRuntimeStatefulSets(ctx context.Context, root string, op } func (b *Backend) deleteRuntimeServices(ctx context.Context, root string, options metav1.DeleteOptions) error { + namespace, _, err := parseRef(root) + if err != nil { + return err + } return b.deleteRuntimeObjects(ctx, root, func(name string) error { - err := b.client.CoreV1().Services(b.config.Namespace).Delete(ctx, name, options) + err := b.client.CoreV1().Services(namespace).Delete(ctx, name, options) if apierrors.IsNotFound(err) { return nil } @@ -741,88 +1047,116 @@ func (b *Backend) deleteRuntimeServices(ctx context.Context, root string, option } func (b *Backend) deleteRuntimeObjects(ctx context.Context, root string, deleteOne func(name string) error, kind string) error { - _, pvc, err := parseRef(root) + namespace, pvc, err := parseRef(root) if err != nil { + logger.Log().Error("Cannot delete Kubernetes runtime objects for invalid root", zap.String("root", root), zap.String("kind", kind), zap.Error(err)) return err } selector := labels.SelectorFromSet(labels.Set{ labelScrollID: dnsLabel(pvc), }).String() + logger.Log().Debug("Deleting Kubernetes runtime objects", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("kind", kind), zap.String("selector", selector)) switch kind { case "jobs": - items, err := b.client.BatchV1().Jobs(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + items, err := b.client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { + logger.Log().Error("Failed to list Kubernetes runtime jobs", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Error(err)) return err } + logger.Log().Debug("Listed Kubernetes runtime jobs", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Int("jobs", len(items.Items))) for _, item := range items.Items { if err := deleteOne(item.Name); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime job", zap.String("namespace", namespace), zap.String("job", item.Name), zap.Error(err)) return err } + logger.Log().Debug("Deleted Kubernetes runtime job", zap.String("namespace", namespace), zap.String("job", item.Name)) } case "statefulsets": - items, err := b.client.AppsV1().StatefulSets(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + items, err := b.client.AppsV1().StatefulSets(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { + logger.Log().Error("Failed to list Kubernetes runtime StatefulSets", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Error(err)) return err } + logger.Log().Debug("Listed Kubernetes runtime StatefulSets", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Int("statefulsets", len(items.Items))) for _, item := range items.Items { if err := deleteOne(item.Name); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", item.Name), zap.Error(err)) return err } + logger.Log().Debug("Deleted Kubernetes runtime StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", item.Name)) } case "services": - items, err := b.client.CoreV1().Services(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + items, err := b.client.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { + logger.Log().Error("Failed to list Kubernetes runtime Services", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Error(err)) return err } + logger.Log().Debug("Listed Kubernetes runtime Services", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.Int("services", len(items.Items))) for _, item := range items.Items { if err := deleteOne(item.Name); err != nil { + logger.Log().Error("Failed to delete Kubernetes runtime Service", zap.String("namespace", namespace), zap.String("service", item.Name), zap.Error(err)) return err } + logger.Log().Debug("Deleted Kubernetes runtime Service", zap.String("namespace", namespace), zap.String("service", item.Name)) } } return nil } func (b *Backend) deleteRuntimePodsByScroll(ctx context.Context, root string, options metav1.DeleteOptions) error { - _, pvc, err := parseRef(root) + namespace, pvc, err := parseRef(root) if err != nil { return err } selector := labels.SelectorFromSet(labels.Set{ labelScrollID: dnsLabel(pvc), }).String() - pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + pods, err := b.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { + logger.Log().Error("Failed to list Kubernetes runtime pods by scroll", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("selector", selector), zap.Error(err)) return err } + logger.Log().Debug("Deleting Kubernetes runtime pods by scroll", zap.String("namespace", namespace), zap.String("pvc", pvc), zap.String("selector", selector), zap.Int("pods", len(pods.Items))) for _, pod := range pods.Items { - if err := b.client.CoreV1().Pods(b.config.Namespace).Delete(ctx, pod.Name, options); err != nil && !apierrors.IsNotFound(err) { + if err := b.client.CoreV1().Pods(namespace).Delete(ctx, pod.Name, options); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to delete Kubernetes runtime pod by scroll", zap.String("namespace", namespace), zap.String("pod", pod.Name), zap.Error(err)) return err } + logger.Log().Debug("Deleted Kubernetes runtime pod by scroll", zap.String("namespace", namespace), zap.String("pod", pod.Name)) } return nil } -func (b *Backend) ensurePVC(ctx context.Context, name string) error { - pvc := pvcSpec(b.config.Namespace, name, b.config.StorageClass) - _, err := b.client.CoreV1().PersistentVolumeClaims(b.config.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) +func (b *Backend) ensurePVC(ctx context.Context, namespace string, name string) error { + pvc := pvcSpec(namespace, name, b.config.StorageClass) + logger.Log().Debug("Ensuring Kubernetes PVC", zap.String("namespace", namespace), zap.String("pvc", name), zap.String("storage_class", b.config.StorageClass)) + _, err := b.client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { + logger.Log().Debug("Kubernetes PVC already exists", zap.String("namespace", namespace), zap.String("pvc", name)) return nil } + if err != nil { + logger.Log().Error("Failed to create Kubernetes PVC", zap.String("namespace", namespace), zap.String("pvc", name), zap.Error(err)) + return err + } + logger.Log().Info("Created Kubernetes PVC", zap.String("namespace", namespace), zap.String("pvc", name)) return err } func (b *Backend) createRegistryConfigSecret(ctx context.Context, namespace string, seed string, credentials []domain.RegistryCredential) (string, func(), error) { if len(credentials) == 0 { + logger.Log().Debug("No registry credentials supplied; skipping Kubernetes registry config secret", zap.String("namespace", namespace)) return "", func() {}, nil } data, err := json.Marshal(struct { Registries []domain.RegistryCredential `json:"registries"` }{Registries: credentials}) if err != nil { + logger.Log().Error("Failed to marshal registry credentials for Kubernetes secret", zap.String("namespace", namespace), zap.Int("registries", len(credentials)), zap.Error(err)) return "", nil, err } name := dnsLabel("druid-registry-" + shortHash(fmt.Sprintf("%s-%d", seed, time.Now().UnixNano()))) + logger.Log().Debug("Creating Kubernetes registry config secret", zap.String("namespace", namespace), zap.String("secret", name), zap.Int("registries", len(credentials))) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -836,43 +1170,74 @@ func (b *Backend) createRegistryConfigSecret(ctx context.Context, namespace stri Data: map[string][]byte{registryConfigSecretKey: data}, } if _, err := b.client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { + logger.Log().Error("Failed to create Kubernetes registry config secret", zap.String("namespace", namespace), zap.String("secret", name), zap.Error(err)) return "", nil, err } + logger.Log().Info("Created Kubernetes registry config secret", zap.String("namespace", namespace), zap.String("secret", name), zap.Int("registries", len(credentials))) cleanup := func() { deleteCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - _ = b.client.CoreV1().Secrets(namespace).Delete(deleteCtx, name, metav1.DeleteOptions{}) + if err := b.client.CoreV1().Secrets(namespace).Delete(deleteCtx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Warn("Failed to delete Kubernetes registry config secret", zap.String("namespace", namespace), zap.String("secret", name), zap.Error(err)) + return + } + logger.Log().Debug("Deleted Kubernetes registry config secret", zap.String("namespace", namespace), zap.String("secret", name)) } return name, cleanup, nil } func (b *Backend) runHelperJob(ctx context.Context, job *batchv1.Job) error { _, err := b.runJobAndLogs(ctx, job) + if err != nil { + logger.Log().Error("Kubernetes helper job failed", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(err)) + } return err } func (b *Backend) runJobAndLogs(ctx context.Context, job *batchv1.Job) ([]byte, error) { if b.jobLogRunner != nil { + logger.Log().Debug("Running Kubernetes job through test log runner", zap.String("namespace", job.Namespace), zap.String("job", job.Name)) return b.jobLogRunner(ctx, job) } + logger.Log().Info("Starting Kubernetes helper job", zap.String("namespace", job.Namespace), zap.String("job", job.Name)) + logger.Log().Debug("Kubernetes helper job details", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.String("service_account", job.Spec.Template.Spec.ServiceAccountName), zap.Int("containers", len(job.Spec.Template.Spec.Containers)), zap.Int("init_containers", len(job.Spec.Template.Spec.InitContainers))) createdJob, err := b.createFreshJob(ctx, job) if err != nil { + logger.Log().Error("Failed to create Kubernetes helper job", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(err)) return nil, err } - podName, err := b.waitForJobPod(ctx, job.Name, string(createdJob.UID)) + jobName := createdJob.Name + podName, err := b.waitForJobPod(ctx, job.Namespace, jobName, string(createdJob.UID)) if err != nil { + logger.Log().Error("Failed to find Kubernetes helper job pod", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.String("uid", string(createdJob.UID)), zap.Error(err)) return nil, err } - exitCode, waitErr := b.waitForJob(ctx, job.Name) - logs, logErr := b.podLogs(ctx, podName) + logger.Log().Debug("Kubernetes helper job pod found", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.String("pod", podName)) + exitCode, waitErr := b.waitForJob(ctx, job.Namespace, jobName) + logs, logErr := b.podLogs(ctx, job.Namespace, podName) + if logErr != nil { + logger.Log().Warn("Failed to collect Kubernetes helper job logs", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.String("pod", podName), zap.Error(logErr)) + } else { + logger.Log().Debug("Collected Kubernetes helper job logs", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.String("pod", podName), zap.Int("bytes", len(logs))) + } + if exitCode != nil && *exitCode == 0 { + b.deleteFinishedJob(context.Background(), job.Namespace, jobName) + } else if exitCode != nil { + logger.Log().Warn("Keeping failed Kubernetes helper job for debugging", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.Int("exit_code", *exitCode)) + } if logErr != nil && waitErr == nil { waitErr = logErr } if waitErr != nil { + logger.Log().Error("Kubernetes helper job wait failed", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.Any("exit_code", exitCode), zap.Error(waitErr)) return logs, waitErr } if exitCode != nil && *exitCode != 0 { - return logs, fmt.Errorf("job %s exited with code %d", job.Name, *exitCode) + logger.Log().Error("Kubernetes helper job exited non-zero", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.Int("exit_code", *exitCode)) + return logs, fmt.Errorf("job %s exited with code %d", jobName, *exitCode) + } + if exitCode != nil { + logger.Log().Info("Kubernetes helper job completed", zap.String("namespace", job.Namespace), zap.String("job", jobName), zap.Int("exit_code", *exitCode)) } return logs, nil } @@ -881,7 +1246,20 @@ func (b *Backend) createFreshJob(ctx context.Context, job *batchv1.Job) (*batchv propagation := metav1.DeletePropagationBackground deleteCtx, cancelDelete := context.WithTimeout(ctx, 30*time.Second) defer cancelDelete() + existing, err := b.client.BatchV1().Jobs(job.Namespace).Get(deleteCtx, job.Name, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to check Kubernetes job before create", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(err)) + return nil, err + } + if existing != nil && kubernetesJobFailed(existing) { + original := job.Name + job = job.DeepCopy() + job.Name = dnsLabel(fmt.Sprintf("%s-%s", original, shortHash(fmt.Sprintf("%s-%d", original, time.Now().UnixNano())))) + logger.Log().Warn("Retaining failed Kubernetes job and creating retry job", zap.String("namespace", job.Namespace), zap.String("failed_job", original), zap.String("retry_job", job.Name)) + } + logger.Log().Debug("Deleting stale Kubernetes job before create", zap.String("namespace", job.Namespace), zap.String("job", job.Name)) if err := b.client.BatchV1().Jobs(job.Namespace).Delete(deleteCtx, job.Name, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil && !apierrors.IsNotFound(err) { + logger.Log().Error("Failed to delete stale Kubernetes job before create", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(err)) return nil, err } for { @@ -890,52 +1268,86 @@ func (b *Backend) createFreshJob(ctx context.Context, job *batchv1.Job) (*batchv break } if err != nil { + logger.Log().Error("Failed to check stale Kubernetes job before create", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(err)) return nil, err } + logger.Log().Debug("Waiting for stale Kubernetes job deletion", zap.String("namespace", job.Namespace), zap.String("job", job.Name)) select { case <-deleteCtx.Done(): + logger.Log().Error("Timed out waiting for stale Kubernetes job deletion", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(deleteCtx.Err())) return nil, deleteCtx.Err() case <-time.After(250 * time.Millisecond): } } - return b.client.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}) + createdJob, err := b.client.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}) + if err != nil { + logger.Log().Error("Failed to create Kubernetes job", zap.String("namespace", job.Namespace), zap.String("job", job.Name), zap.Error(err)) + return nil, err + } + logger.Log().Info("Created Kubernetes job", zap.String("namespace", job.Namespace), zap.String("job", createdJob.Name), zap.String("uid", string(createdJob.UID))) + return createdJob, nil +} + +func (b *Backend) deleteFinishedJob(ctx context.Context, namespace string, name string) { + propagation := metav1.DeletePropagationBackground + deleteCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + logger.Log().Debug("Deleting finished Kubernetes job", zap.String("namespace", namespace), zap.String("job", name)) + err := b.client.BatchV1().Jobs(namespace).Delete(deleteCtx, name, metav1.DeleteOptions{PropagationPolicy: &propagation}) + if apierrors.IsNotFound(err) { + logger.Log().Debug("Finished Kubernetes job was already absent", zap.String("namespace", namespace), zap.String("job", name)) + return + } + if err != nil && !apierrors.IsNotFound(err) { + logger.Log().Warn("Failed to delete finished Kubernetes job", zap.String("namespace", namespace), zap.String("job", name), zap.Error(err)) + return + } + logger.Log().Info("Deleted finished Kubernetes job", zap.String("namespace", namespace), zap.String("job", name)) } -func (b *Backend) waitForJobPod(ctx context.Context, jobName string, controllerUID string) (string, error) { +func (b *Backend) waitForJobPod(ctx context.Context, namespace string, jobName string, controllerUID string) (string, error) { matchLabels := labels.Set{"job-name": jobName} if controllerUID != "" { matchLabels["controller-uid"] = controllerUID } selector := labels.SelectorFromSet(matchLabels).String() - return b.waitForPodBySelector(ctx, selector) + logger.Log().Debug("Waiting for Kubernetes job pod", zap.String("namespace", namespace), zap.String("job", jobName), zap.String("selector", selector), zap.String("controller_uid", controllerUID)) + return b.waitForPodBySelector(ctx, namespace, selector) } -func (b *Backend) waitForPodBySelector(ctx context.Context, selector string) (string, error) { +func (b *Backend) waitForPodBySelector(ctx context.Context, namespace string, selector string) (string, error) { deadline := time.Now().Add(2 * time.Minute) backoff := newCappedBackoff(podPollInitial, podPollMax) for { - pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + pods, err := b.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { + logger.Log().Error("Failed to list Kubernetes pods while waiting", zap.String("namespace", namespace), zap.String("selector", selector), zap.Error(err)) return "", err } if len(pods.Items) > 0 { + logger.Log().Debug("Kubernetes pod matched selector", zap.String("namespace", namespace), zap.String("selector", selector), zap.String("pod", pods.Items[0].Name), zap.Int("matches", len(pods.Items))) return pods.Items[0].Name, nil } if time.Now().After(deadline) { + logger.Log().Error("Timed out waiting for Kubernetes pod", zap.String("namespace", namespace), zap.String("selector", selector)) return "", fmt.Errorf("timed out waiting for pod matching selector %s", selector) } - if err := sleepUntilNextPoll(ctx, deadline, backoff.Next()); err != nil { + sleep := backoff.Next() + logger.Log().Debug("No Kubernetes pod matched yet", zap.String("namespace", namespace), zap.String("selector", selector), zap.Duration("sleep", sleep), zap.Time("deadline", deadline)) + if err := sleepUntilNextPoll(ctx, deadline, sleep); err != nil { + logger.Log().Warn("Stopped waiting for Kubernetes pod", zap.String("namespace", namespace), zap.String("selector", selector), zap.Error(err)) return "", err } } } -func (b *Backend) waitForStatefulSet(ctx context.Context, name string) error { +func (b *Backend) waitForStatefulSet(ctx context.Context, namespace string, name string) error { deadline := time.Now().Add(5 * time.Minute) backoff := newCappedBackoff(statefulSetPollInitial, statefulSetPollMax) for { - statefulSet, err := b.client.AppsV1().StatefulSets(b.config.Namespace).Get(ctx, name, metav1.GetOptions{}) + statefulSet, err := b.client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { + logger.Log().Error("Failed to get Kubernetes StatefulSet while waiting", zap.String("namespace", namespace), zap.String("statefulset", name), zap.Error(err)) return err } wanted := int32(1) @@ -943,89 +1355,131 @@ func (b *Backend) waitForStatefulSet(ctx context.Context, name string) error { wanted = *statefulSet.Spec.Replicas } if statefulSet.Status.ReadyReplicas >= wanted { + logger.Log().Debug("Kubernetes StatefulSet ready", zap.String("namespace", namespace), zap.String("statefulset", name), zap.Int32("ready", statefulSet.Status.ReadyReplicas), zap.Int32("wanted", wanted)) return nil } if time.Now().After(deadline) { + logger.Log().Error("Timed out waiting for Kubernetes StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", name), zap.Int32("ready", statefulSet.Status.ReadyReplicas), zap.Int32("wanted", wanted)) return fmt.Errorf("timed out waiting for StatefulSet %s to become ready", name) } - if err := sleepUntilNextPoll(ctx, deadline, backoff.Next()); err != nil { + sleep := backoff.Next() + logger.Log().Debug("Kubernetes StatefulSet not ready yet", zap.String("namespace", namespace), zap.String("statefulset", name), zap.Int32("ready", statefulSet.Status.ReadyReplicas), zap.Int32("wanted", wanted), zap.Duration("sleep", sleep), zap.Time("deadline", deadline)) + if err := sleepUntilNextPoll(ctx, deadline, sleep); err != nil { + logger.Log().Warn("Stopped waiting for Kubernetes StatefulSet", zap.String("namespace", namespace), zap.String("statefulset", name), zap.Error(err)) return err } } } -func (b *Backend) waitForJob(ctx context.Context, jobName string) (*int, error) { +func (b *Backend) waitForJob(ctx context.Context, namespace string, jobName string) (*int, error) { startedAt := time.Now() deadline := time.Now().Add(24 * time.Hour) for { - job, err := b.client.BatchV1().Jobs(b.config.Namespace).Get(ctx, jobName, metav1.GetOptions{}) + job, err := b.client.BatchV1().Jobs(namespace).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { + logger.Log().Error("Failed to get Kubernetes job while waiting", zap.String("namespace", namespace), zap.String("job", jobName), zap.Error(err)) return nil, err } if job.Status.Succeeded > 0 { exitCode := 0 + logger.Log().Debug("Kubernetes job succeeded", zap.String("namespace", namespace), zap.String("job", jobName), zap.Int32("succeeded", job.Status.Succeeded), zap.Int32("failed", job.Status.Failed), zap.Int32("active", job.Status.Active)) return &exitCode, nil } - if job.Status.Failed > 0 { - exitCode := b.lastExitCode(ctx, jobName) + if kubernetesJobFailed(job) { + exitCode := b.lastExitCode(ctx, namespace, jobName) + logger.Log().Error("Kubernetes job failed", zap.String("namespace", namespace), zap.String("job", jobName), zap.Int("exit_code", exitCode), zap.Int32("succeeded", job.Status.Succeeded), zap.Int32("failed", job.Status.Failed), zap.Int32("active", job.Status.Active)) return &exitCode, fmt.Errorf("job %s failed", jobName) } if time.Now().After(deadline) { + logger.Log().Error("Timed out waiting for Kubernetes job", zap.String("namespace", namespace), zap.String("job", jobName), zap.Int32("succeeded", job.Status.Succeeded), zap.Int32("failed", job.Status.Failed), zap.Int32("active", job.Status.Active)) return nil, fmt.Errorf("timed out waiting for job %s", jobName) } - if err := sleepUntilNextPoll(ctx, deadline, jobPollInterval(time.Since(startedAt))); err != nil { + sleep := jobPollInterval(time.Since(startedAt)) + logger.Log().Debug("Kubernetes job still running", zap.String("namespace", namespace), zap.String("job", jobName), zap.Int32("succeeded", job.Status.Succeeded), zap.Int32("failed", job.Status.Failed), zap.Int32("active", job.Status.Active), zap.Duration("sleep", sleep), zap.Time("deadline", deadline)) + if err := sleepUntilNextPoll(ctx, deadline, sleep); err != nil { + logger.Log().Warn("Stopped waiting for Kubernetes job", zap.String("namespace", namespace), zap.String("job", jobName), zap.Error(err)) return nil, err } } } -func (b *Backend) lastExitCode(ctx context.Context, jobName string) int { +func kubernetesJobFailed(job *batchv1.Job) bool { + for _, condition := range job.Status.Conditions { + if condition.Type == batchv1.JobFailed && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func (b *Backend) lastExitCode(ctx context.Context, namespace string, jobName string) int { selector := labels.SelectorFromSet(labels.Set{"job-name": jobName}).String() - pods, err := b.client.CoreV1().Pods(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + pods, err := b.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil || len(pods.Items) == 0 { + podCount := 0 + if pods != nil { + podCount = len(pods.Items) + } + logger.Log().Warn("Could not read Kubernetes job exit code; defaulting to 1", zap.String("namespace", namespace), zap.String("job", jobName), zap.String("selector", selector), zap.Int("pods", podCount), zap.Error(err)) return 1 } for _, status := range pods.Items[0].Status.ContainerStatuses { if status.State.Terminated != nil { + logger.Log().Debug("Read Kubernetes job container exit code", zap.String("namespace", namespace), zap.String("job", jobName), zap.String("pod", pods.Items[0].Name), zap.String("container", status.Name), zap.Int32("exit_code", status.State.Terminated.ExitCode)) return int(status.State.Terminated.ExitCode) } } + logger.Log().Warn("Kubernetes job pod had no terminated container status; defaulting to 1", zap.String("namespace", namespace), zap.String("job", jobName), zap.String("pod", pods.Items[0].Name)) return 1 } -func (b *Backend) podLogs(ctx context.Context, podName string) ([]byte, error) { - req := b.client.CoreV1().Pods(b.config.Namespace).GetLogs(podName, &corev1.PodLogOptions{}) +func (b *Backend) podLogs(ctx context.Context, namespace string, podName string) ([]byte, error) { + logger.Log().Debug("Reading Kubernetes pod logs", zap.String("namespace", namespace), zap.String("pod", podName)) + req := b.client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) stream, err := req.Stream(ctx) if err != nil { + logger.Log().Warn("Failed to open Kubernetes pod log stream", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) return nil, err } defer stream.Close() - return io.ReadAll(stream) + logs, err := io.ReadAll(stream) + if err != nil { + logger.Log().Warn("Failed to read Kubernetes pod logs", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) + return logs, err + } + logger.Log().Debug("Read Kubernetes pod logs", zap.String("namespace", namespace), zap.String("pod", podName), zap.Int("bytes", len(logs))) + return logs, nil } -func (b *Backend) streamPodLogs(ctx context.Context, podName string, output chan<- string) { +func (b *Backend) streamPodLogs(ctx context.Context, namespace string, podName string, output chan<- string) { defer close(output) var stream io.ReadCloser deadline := time.Now().Add(30 * time.Second) + logger.Log().Debug("Opening Kubernetes follow log stream", zap.String("namespace", namespace), zap.String("pod", podName)) for { - req := b.client.CoreV1().Pods(b.config.Namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) + req := b.client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) var err error stream, err = req.Stream(ctx) if err == nil { + logger.Log().Debug("Kubernetes follow log stream opened", zap.String("namespace", namespace), zap.String("pod", podName)) break } if !strings.Contains(err.Error(), "ContainerCreating") && !strings.Contains(err.Error(), "PodInitializing") && !strings.Contains(err.Error(), "not available") { + logger.Log().Warn("Failed to stream Kubernetes pod logs", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) output <- fmt.Sprintf("failed to stream pod logs: %v", err) return } if time.Now().After(deadline) { + logger.Log().Warn("Timed out opening Kubernetes pod log stream", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) output <- fmt.Sprintf("failed to stream pod logs: %v", err) return } + logger.Log().Debug("Kubernetes pod logs not ready yet", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) select { case <-ctx.Done(): + logger.Log().Warn("Context cancelled while opening Kubernetes pod logs", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(ctx.Err())) output <- fmt.Sprintf("failed to stream pod logs: %v", ctx.Err()) return case <-time.After(500 * time.Millisecond): @@ -1036,28 +1490,106 @@ func (b *Backend) streamPodLogs(ctx context.Context, podName string, output chan for scanner.Scan() { output <- scanner.Text() } + if err := scanner.Err(); err != nil { + logger.Log().Warn("Kubernetes pod log stream ended with scanner error", zap.String("namespace", namespace), zap.String("pod", podName), zap.Error(err)) + return + } + logger.Log().Debug("Kubernetes pod log stream ended", zap.String("namespace", namespace), zap.String("pod", podName)) } -func (b *Backend) ensureExpectedServices(ctx context.Context, root string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port) error { +func (b *Backend) ensureExpectedServices(ctx context.Context, root string, commandName string, procedureName string, procedure *domain.Procedure, globalPorts []domain.Port, portUse map[string]int) error { + namespace, _, err := parseRef(root) + if err != nil { + logger.Log().Error("Cannot reconcile Kubernetes Services for invalid root", zap.String("root", root), zap.String("command", commandName), zap.String("procedure", procedureName), zap.Error(err)) + return err + } ports := portsByName(globalPorts) for _, expected := range procedure.ExpectedPorts { port, ok := ports[expected.Name] if !ok { - return fmt.Errorf("expected port %s is not defined in top-level ports", expected.Name) + err := fmt.Errorf("expected port %s is not defined in top-level ports", expected.Name) + logger.Log().Error("Kubernetes expected port has no top-level port definition", zap.String("namespace", namespace), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port", expected.Name), zap.Error(err)) + return err } - service, err := serviceSpec(b.config.Namespace, root, procedureName, expected.Name, port) + serviceProcedure := serviceProcedureName(commandName, procedureName, expected.Name, portUse) + service, err := serviceSpec(namespace, root, serviceProcedure, serviceSelector(refPVCName(root), commandName, procedureName, expected.Name, portUse), expected.Name, port) if err != nil { + logger.Log().Error("Failed to build Kubernetes Service for expected port", zap.String("namespace", namespace), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port", expected.Name), zap.Error(err)) return err } - if _, err := b.client.CoreV1().Services(b.config.Namespace).Create(ctx, service, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + logger.Log().Debug("Reconciling Kubernetes expected-port Service", + zap.String("namespace", namespace), + zap.String("command", commandName), + zap.String("procedure", procedureName), + zap.String("service_procedure", serviceProcedure), + zap.String("service", service.Name), + zap.String("port_name", expected.Name), + zap.Int("port", port.Port), + zap.String("protocol", port.Protocol), + zap.Any("selector", service.Spec.Selector), + ) + current, err := b.client.CoreV1().Services(namespace).Get(ctx, service.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + logger.Log().Info("Creating Kubernetes expected-port Service", zap.String("namespace", namespace), zap.String("service", service.Name), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port_name", expected.Name)) + if _, err := b.client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}); err != nil { + logger.Log().Error("Failed to create Kubernetes expected-port Service", zap.String("namespace", namespace), zap.String("service", service.Name), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port_name", expected.Name), zap.Error(err)) + return err + } + case err != nil: + logger.Log().Error("Failed to get Kubernetes expected-port Service", zap.String("namespace", namespace), zap.String("service", service.Name), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port_name", expected.Name), zap.Error(err)) return err + default: + logger.Log().Info("Updating Kubernetes expected-port Service", zap.String("namespace", namespace), zap.String("service", service.Name), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port_name", expected.Name), zap.String("resource_version", current.ResourceVersion)) + service.ResourceVersion = current.ResourceVersion + service.Spec.ClusterIP = current.Spec.ClusterIP + service.Spec.ClusterIPs = current.Spec.ClusterIPs + service.Spec.IPFamilies = current.Spec.IPFamilies + service.Spec.IPFamilyPolicy = current.Spec.IPFamilyPolicy + if _, err := b.client.CoreV1().Services(namespace).Update(ctx, service, metav1.UpdateOptions{}); err != nil { + logger.Log().Error("Failed to update Kubernetes expected-port Service", zap.String("namespace", namespace), zap.String("service", service.Name), zap.String("command", commandName), zap.String("procedure", procedureName), zap.String("port_name", expected.Name), zap.Error(err)) + return err + } } } return nil } -func (b *Backend) serviceReady(ctx context.Context, name string) (bool, int) { - service, err := b.client.CoreV1().Services(b.config.Namespace).Get(ctx, name, metav1.GetOptions{}) +func expectedPortUse(command *domain.CommandInstructionSet) map[string]int { + use := map[string]int{} + if command == nil { + return use + } + for _, procedure := range command.Procedures { + if procedure == nil { + continue + } + for _, expected := range procedure.ExpectedPorts { + use[expected.Name]++ + } + } + return use +} + +func serviceProcedureName(commandName string, procedureName string, portName string, portUse map[string]int) string { + if portUse[portName] > 1 { + return commandName + } + return procedureName +} + +func serviceSelector(pvc string, commandName string, procedureName string, portName string, portUse map[string]int) map[string]string { + selector := baseLabels(pvc) + if portUse[portName] > 1 { + selector[labelCommand] = dnsLabel(commandName) + return selector + } + selector[labelProcedure] = dnsLabel(procedureName) + return selector +} + +func (b *Backend) serviceReady(ctx context.Context, namespace string, name string) (bool, int) { + service, err := b.client.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, 0 } @@ -1066,7 +1598,7 @@ func (b *Backend) serviceReady(ctx context.Context, name string) (bool, int) { hostPort = int(service.Spec.Ports[0].Port) } selector := labels.SelectorFromSet(labels.Set{"kubernetes.io/service-name": name}).String() - slices, err := b.client.DiscoveryV1().EndpointSlices(b.config.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + slices, err := b.client.DiscoveryV1().EndpointSlices(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { return false, hostPort } diff --git a/internal/runtime/kubernetes/names.go b/internal/runtime/kubernetes/names.go index 083cde66..603165a9 100644 --- a/internal/runtime/kubernetes/names.go +++ b/internal/runtime/kubernetes/names.go @@ -34,6 +34,15 @@ func dnsLabel(value string) string { return strings.Trim(value[:40], "-") + "-" + hash } +func objectName(value string) string { + name := dnsLabel(value) + // Services are stricter than most workload names: they must start with a letter. + if name[0] < 'a' || name[0] > 'z' { + name = "d-" + name + } + return name +} + func shortHash(value string) string { sum := sha1.Sum([]byte(value)) return hex.EncodeToString(sum[:])[:10] @@ -47,12 +56,20 @@ func stagingPVCName(artifact string) string { return dnsLabel("druid-stage-" + shortHash(artifact)) } -func jobName(prefix string, root string, procedureName string) string { - return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", prefix, refPVCName(root), procedureName)) +func runtimeID(root string) string { + pvc := refPVCName(root) + if strings.HasPrefix(pvc, "druid-") && strings.HasSuffix(pvc, "-data") { + return strings.TrimSuffix(strings.TrimPrefix(pvc, "druid-"), "-data") + } + return pvc +} + +func procedureResourceName(root string, commandName string, procedureIndex int) string { + return objectName(fmt.Sprintf("%s-%s-%d", runtimeID(root), commandName, procedureIndex)) } -func statefulSetName(root string, procedureName string) string { - return dnsLabel(fmt.Sprintf("druid-sts-%s-%s", refPVCName(root), procedureName)) +func jobName(prefix string, root string, procedureName string) string { + return objectName(fmt.Sprintf("%s-%s-%s", runtimeID(root), prefix, procedureName)) } func devStatefulSetName(root string) string { @@ -60,7 +77,7 @@ func devStatefulSetName(root string) string { } func serviceName(root string, procedureName string, portName string) string { - return dnsLabel(fmt.Sprintf("druid-%s-%s-%s", refPVCName(root), procedureName, portName)) + return objectName(fmt.Sprintf("%s-%s-%s", runtimeID(root), procedureName, portName)) } func ref(namespace string, pvc string) string { diff --git a/internal/runtime/kubernetes/names_test.go b/internal/runtime/kubernetes/names_test.go new file mode 100644 index 00000000..e8641586 --- /dev/null +++ b/internal/runtime/kubernetes/names_test.go @@ -0,0 +1,22 @@ +package kubernetes + +import "testing" + +func TestProcedureResourceNamePrefixesNumericRuntimeID(t *testing.T) { + root := ref("games", dataPVCName("0636a354-b3f4-4471-8749-3890e675a01c")) + + if got, want := procedureResourceName(root, "start", 1), "d-0636a354-b3f4-4471-8749-3890e675a01c-start-1"; got != want { + t.Fatalf("procedureResourceName = %s, want %s", got, want) + } + if got, want := serviceName(root, "start", "main"), "d-0636a354-b3f4-4471-8749-3890e675a01c-start-main"; got != want { + t.Fatalf("serviceName = %s, want %s", got, want) + } +} + +func TestProcedureResourceNameKeepsReadableAlphaRuntimeID(t *testing.T) { + root := ref("games", dataPVCName("deployment-123")) + + if got, want := procedureResourceName(root, "start", 0), "deployment-123-start-0"; got != want { + t.Fatalf("procedureResourceName = %s, want %s", got, want) + } +} diff --git a/internal/runtime/kubernetes/resources.go b/internal/runtime/kubernetes/resources.go index 8341e6dd..19cf71d5 100644 --- a/internal/runtime/kubernetes/resources.go +++ b/internal/runtime/kubernetes/resources.go @@ -200,14 +200,14 @@ func helperJobSpec(namespace string, jobName string, pvc string, image string, c } } -func procedureJobSpec(namespace string, root string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*batchv1.Job, error) { +func procedureJobSpec(namespace string, root string, commandName string, procedureName string, resourceName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*batchv1.Job, error) { _, pvc, err := parseRef(root) if err != nil { return nil, err } labels := baseLabels(pvc) labels[labelProcedure] = dnsLabel(procedureName) - labels[labelCommand] = dnsLabel(procedureName) + labels[labelCommand] = dnsLabel(commandName) if len(procedure.ExpectedPorts) == 1 { labels[labelPortName] = dnsLabel(procedure.ExpectedPorts[0].Name) } @@ -233,7 +233,7 @@ func procedureJobSpec(namespace string, root string, procedureName string, proce } return &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ - Name: jobName("proc", root, procedureName), + Name: resourceName, Namespace: namespace, Labels: labels, }, @@ -247,14 +247,14 @@ func procedureJobSpec(namespace string, root string, procedureName string, proce }, nil } -func procedureStatefulSetSpec(namespace string, root string, procedureName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*appsv1.StatefulSet, error) { +func procedureStatefulSetSpec(namespace string, root string, commandName string, procedureName string, resourceName string, procedure *domain.Procedure, env map[string]string, registrySecret string) (*appsv1.StatefulSet, error) { _, pvc, err := parseRef(root) if err != nil { return nil, err } labels := baseLabels(pvc) labels[labelProcedure] = dnsLabel(procedureName) - labels[labelCommand] = dnsLabel(procedureName) + labels[labelCommand] = dnsLabel(commandName) if len(procedure.ExpectedPorts) == 1 { labels[labelPortName] = dnsLabel(procedure.ExpectedPorts[0].Name) } @@ -279,13 +279,13 @@ func procedureStatefulSetSpec(namespace string, root string, procedureName strin } return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: statefulSetName(root, procedureName), + Name: resourceName, Namespace: namespace, Labels: labels, }, Spec: appsv1.StatefulSetSpec{ Replicas: &replicas, - ServiceName: statefulSetName(root, procedureName), + ServiceName: resourceName, Selector: &metav1.LabelSelector{MatchLabels: labels}, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, @@ -365,23 +365,21 @@ func devServiceSpec(namespace string, root string, pvc string) *corev1.Service { } } -func serviceSpec(namespace string, root string, procedureName string, portName string, port domain.Port) (*corev1.Service, error) { +func serviceSpec(namespace string, root string, serviceProcedure string, selector map[string]string, portName string, port domain.Port) (*corev1.Service, error) { _, pvc, err := parseRef(root) if err != nil { return nil, err } labels := baseLabels(pvc) - labels[labelProcedure] = dnsLabel(procedureName) + labels[labelProcedure] = dnsLabel(serviceProcedure) labels[labelPortName] = dnsLabel(portName) - selector := baseLabels(pvc) - selector[labelProcedure] = dnsLabel(procedureName) protocol := corev1.ProtocolTCP if normalizeProtocol(port.Protocol) == "udp" { protocol = corev1.ProtocolUDP } return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName(root, procedureName, portName), + Name: serviceName(root, serviceProcedure, portName), Namespace: namespace, Labels: labels, }, diff --git a/internal/runtime/kubernetes/resources_test.go b/internal/runtime/kubernetes/resources_test.go index 61dba1c0..731989cf 100644 --- a/internal/runtime/kubernetes/resources_test.go +++ b/internal/runtime/kubernetes/resources_test.go @@ -29,6 +29,16 @@ func (f fakeHubble) HasFlow(context.Context, TrafficQuery) (bool, error) { return f.hasFlow, f.err } +func TestRootRefUsesRequestedNamespace(t *testing.T) { + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), fake.NewSimpleClientset(), fakeHubble{}) + if got, want := backend.RootRef("deployment-123", "games"), ref("games", dataPVCName("deployment-123")); got != want { + t.Fatalf("RootRef = %s, want %s", got, want) + } + if got, want := backend.RootRef("deployment-123", ""), ref("druid", dataPVCName("deployment-123")); got != want { + t.Fatalf("RootRef default = %s, want %s", got, want) + } +} + func TestProcedureJobSpecBuildsDeterministicMountsAndLabels(t *testing.T) { procedure := &domain.Procedure{ Image: "alpine:3.20", @@ -41,7 +51,7 @@ func TestProcedureJobSpecBuildsDeterministicMountsAndLabels(t *testing.T) { Mounts: []domain.Mount{{Path: "/work", SubPath: "cache"}}, } - job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, procedure.Env, "registry-secret") + job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", "start", "static-web-start-0", procedure, procedure.Env, "registry-secret") if err != nil { t.Fatal(err) } @@ -75,7 +85,7 @@ func TestProcedureJobSpecUsesProvidedRuntimeEnv(t *testing.T) { "PROCEDURE_ONLY": "ignored", }, } - job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, map[string]string{ + job, err := procedureJobSpec("druid", ref("druid", "druid-static-web-data"), "start", "start", "static-web-start-0", procedure, map[string]string{ "DRUID_PORT_HTTP": "8080", }, "registry-secret") if err != nil { @@ -94,7 +104,7 @@ func TestProcedureStatefulSetSpecUsesProvidedRuntimeEnv(t *testing.T) { "PROCEDURE_ONLY": "ignored", }, } - statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, map[string]string{ + statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", "start", "static-web-start-0", procedure, map[string]string{ "DRUID_PORT_HTTP": "8080", }, "registry-secret") if err != nil { @@ -114,7 +124,7 @@ func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { Mounts: []domain.Mount{{Path: "/usr/share/nginx/html", SubPath: "site", ReadOnly: true}}, } - statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", procedure, procedure.Env, "registry-secret") + statefulSet, err := procedureStatefulSetSpec("druid", ref("druid", "druid-static-web-data"), "start", "start", "static-web-start-0", procedure, procedure.Env, "registry-secret") if err != nil { t.Fatal(err) } @@ -122,7 +132,7 @@ func TestProcedureStatefulSetSpecBuildsPersistentWorkload(t *testing.T) { if statefulSet.Namespace != "druid" { t.Fatalf("namespace = %s, want druid", statefulSet.Namespace) } - if statefulSet.Name != statefulSetName(ref("druid", "druid-static-web-data"), "start") { + if statefulSet.Name != "static-web-start-0" { t.Fatalf("name = %s", statefulSet.Name) } if statefulSet.Spec.Replicas == nil || *statefulSet.Spec.Replicas != 1 { @@ -215,7 +225,7 @@ func TestSpawnPullWorkerCreateUsesFinalPVCAndWorkerJob(t *testing.T) { Mode: ports.RuntimeWorkerModeCreate, RuntimeID: "deployment-123", Artifact: "registry.local/lab:1.0", - RootRef: ref("druid", dataPVCName("deployment-123")), + RootRef: ref("games", dataPVCName("deployment-123")), MountPath: "/scroll", CallbackURL: "http://druid-cli:8083/internal/v1/workers/deployment-123/complete", CallbackToken: "secret-token", @@ -223,7 +233,7 @@ func TestSpawnPullWorkerCreateUsesFinalPVCAndWorkerJob(t *testing.T) { if err := backend.SpawnPullWorker(context.Background(), action); err != nil { t.Fatal(err) } - pvcs, err := client.CoreV1().PersistentVolumeClaims("druid").List(context.Background(), metav1.ListOptions{}) + pvcs, err := client.CoreV1().PersistentVolumeClaims("games").List(context.Background(), metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -233,12 +243,85 @@ func TestSpawnPullWorkerCreateUsesFinalPVCAndWorkerJob(t *testing.T) { if len(jobs) != 1 { t.Fatalf("jobs = %d, want 1", len(jobs)) } + if jobs[0].Namespace != "games" { + t.Fatalf("job namespace = %s, want games", jobs[0].Namespace) + } command := strings.Join(jobs[0].Spec.Template.Spec.Containers[0].Command, " ") if !strings.Contains(command, "worker pull") || strings.Contains(command, "cat /scroll/scroll.yaml") || strings.Contains(command, "--action-id") { t.Fatalf("command = %#v", jobs[0].Spec.Template.Spec.Containers[0].Command) } } +func TestDeleteFinishedJobRemovesJob(t *testing.T) { + client := fake.NewSimpleClientset(&batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "finished", Namespace: "druid"}, + }) + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + + backend.deleteFinishedJob(context.Background(), "druid", "finished") + + if _, err := client.BatchV1().Jobs("druid").Get(context.Background(), "finished", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("Job get error = %v, want not found", err) + } +} + +func TestCreateFreshJobKeepsFailedJob(t *testing.T) { + client := fake.NewSimpleClientset(&batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "failed", Namespace: "druid"}, + Status: batchv1.JobStatus{Conditions: []batchv1.JobCondition{{ + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }}}, + }) + backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + + created, err := backend.createFreshJob(context.Background(), &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "failed", Namespace: "druid"}}) + if err != nil { + t.Fatal(err) + } + if created.Name == "failed" { + t.Fatal("retry job reused failed job name") + } + if _, err := client.BatchV1().Jobs("druid").Get(context.Background(), "failed", metav1.GetOptions{}); err != nil { + t.Fatalf("failed Job was not retained: %v", err) + } + if _, err := client.BatchV1().Jobs("druid").Get(context.Background(), created.Name, metav1.GetOptions{}); err != nil { + t.Fatalf("retry Job was not created: %v", err) + } +} + +func TestKubernetesJobFailedRequiresTerminalCondition(t *testing.T) { + retrying := &batchv1.Job{Status: batchv1.JobStatus{Failed: 1, Active: 1}} + if kubernetesJobFailed(retrying) { + t.Fatal("job with failed pod but no terminal Failed condition should still be retryable") + } + failed := &batchv1.Job{Status: batchv1.JobStatus{Conditions: []batchv1.JobCondition{{ + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }}}} + if !kubernetesJobFailed(failed) { + t.Fatal("job with terminal Failed condition should be failed") + } +} + +func TestExpectedServicesUseRootNamespace(t *testing.T) { + client := fake.NewSimpleClientset() + backend := NewWithClient(Config{Namespace: "druid-system"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) + root := ref("games", dataPVCName("deployment-123")) + procedure := &domain.Procedure{ExpectedPorts: []domain.ExpectedPort{{Name: "http"}}} + + err := backend.ensureExpectedServices(context.Background(), root, "start", "start", procedure, []domain.Port{{Name: "http", Port: 8080, Protocol: "tcp"}}, map[string]int{"http": 1}) + if err != nil { + t.Fatal(err) + } + if _, err := client.CoreV1().Services("games").Get(context.Background(), serviceName(root, "start", "http"), metav1.GetOptions{}); err != nil { + t.Fatalf("service in runtime namespace: %v", err) + } + if _, err := client.CoreV1().Services("druid-system").Get(context.Background(), serviceName(root, "start", "http"), metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Fatalf("service in backend namespace error = %v, want not found", err) + } +} + func TestRegistryConfigSecretUsesDruidClientConfigShape(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) @@ -271,7 +354,7 @@ func TestExpectedPortsUsesHubbleFlowPresence(t *testing.T) { backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{hasFlow: true}) root := ref("druid", "druid-static-web-data") procedureName := "start" - service, err := serviceSpec("druid", root, procedureName, "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) + service, err := serviceSpec("druid", root, procedureName, serviceSelector(refPVCName(root), procedureName, procedureName, "http", map[string]int{"http": 1}), "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -319,7 +402,7 @@ func TestExpectedPortsDegradesWhenHubbleUnavailable(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{err: errors.New("relay unavailable")}) root := ref("druid", "druid-static-web-data") - service, err := serviceSpec("druid", root, "start", "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) + service, err := serviceSpec("druid", root, "start", serviceSelector(refPVCName(root), "start", "start", "http", map[string]int{"http": 1}), "http", domain.Port{Name: "http", Port: 80, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -379,7 +462,7 @@ func TestRoutingTargetsReturnStableBackendServices(t *testing.T) { if target.Protocol != "http" || target.PortName != "http" || target.Procedure != "web" { t.Fatalf("target = %#v", target) } - if target.Selector[labelScrollID] != "druid-static-web-data" || target.Selector[labelPortName] != "http" { + if target.Selector[labelScrollID] != "druid-static-web-data" || target.Selector[labelProcedure] != "web" { t.Fatalf("selector = %#v", target.Selector) } if webdav.ServiceName != serviceName(root, "dev", "webdav") || webdav.Port != 8084 || webdav.Protocol != "https" { @@ -415,7 +498,7 @@ func TestRoutingTargetsCollapseColdstarterAndRuntimePort(t *testing.T) { if mainTargets[0].Name != "main" || mainTargets[0].Procedure != "coldstart" { t.Fatalf("main target = %#v", mainTargets[0]) } - if mainTargets[0].Selector[labelPortName] != "main" { + if mainTargets[0].Selector[labelCommand] != "start" { t.Fatalf("selector = %#v", mainTargets[0].Selector) } } @@ -426,9 +509,9 @@ func TestStopRuntimeDeletesWorkloadsButPreservesDataAndServices(t *testing.T) { root := ref("druid", "druid-static-web-data") labels := baseLabels("druid-static-web-data") labels[labelProcedure] = "web" - jobName := jobName("proc", root, "web") - statefulSetName := statefulSetName(root, "web") - service, err := serviceSpec("druid", root, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) + jobName := "static-web-web-0" + statefulSetName := "static-web-web-0" + service, err := serviceSpec("druid", root, "web", serviceSelector(refPVCName(root), "web", "web", "http", map[string]int{"http": 1}), "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -483,7 +566,7 @@ func TestDeleteRuntimePurgesServicesAndDataWhenRequested(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) root := ref("druid", "druid-static-web-data") - service, err := serviceSpec("druid", root, "web", "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) + service, err := serviceSpec("druid", root, "web", serviceSelector(refPVCName(root), "web", "web", "http", map[string]int{"http": 1}), "http", domain.Port{Name: "http", Port: 8080, Protocol: "tcp"}) if err != nil { t.Fatal(err) } @@ -550,7 +633,7 @@ func TestSignalDeletesPersistentStatefulSetAndPods(t *testing.T) { client := fake.NewSimpleClientset() backend := NewWithClient(Config{Namespace: "druid"}, coreservices.NewConsoleManager(coreservices.NewLogManager()), client, fakeHubble{}) root := ref("druid", "druid-static-web-data") - name := statefulSetName(root, "start") + name := "static-web-start-0" labels := baseLabels("druid-static-web-data") labels[labelProcedure] = "start" if _, err := client.AppsV1().StatefulSets("druid").Create(context.Background(), &appsv1.StatefulSet{ diff --git a/internal/runtime/kubernetes/state_store.go b/internal/runtime/kubernetes/state_store.go index 8d53e3f8..a1e60ea9 100644 --- a/internal/runtime/kubernetes/state_store.go +++ b/internal/runtime/kubernetes/state_store.go @@ -14,7 +14,6 @@ import ( k8sclient "k8s.io/client-go/kubernetes" "github.com/highcard-dev/daemon/internal/core/domain" - coreservices "github.com/highcard-dev/daemon/internal/core/services" ) const ( @@ -84,7 +83,7 @@ func (s *ConfigMapStateStore) CreateScroll(scroll *domain.RuntimeScroll) error { } _, err = s.client.CoreV1().ConfigMaps(s.namespace).Create(context.Background(), configMap, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { - return fmt.Errorf("%w: %s", coreservices.ErrScrollAlreadyExists, scroll.ID) + return fmt.Errorf("%w: %s", domain.ErrRuntimeScrollAlreadyExists, scroll.ID) } return err } @@ -115,7 +114,7 @@ func (s *ConfigMapStateStore) ListScrolls() ([]*domain.RuntimeScroll, error) { func (s *ConfigMapStateStore) GetScroll(id string) (*domain.RuntimeScroll, error) { configMap, err := s.client.CoreV1().ConfigMaps(s.namespace).Get(context.Background(), scrollConfigMapName(id), metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return nil, coreservices.ErrScrollNotFound + return nil, domain.ErrRuntimeScrollNotFound } if err != nil { return nil, err @@ -126,7 +125,7 @@ func (s *ConfigMapStateStore) GetScroll(id string) (*domain.RuntimeScroll, error func (s *ConfigMapStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { current, err := s.client.CoreV1().ConfigMaps(s.namespace).Get(context.Background(), scrollConfigMapName(scroll.ID), metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return coreservices.ErrScrollNotFound + return domain.ErrRuntimeScrollNotFound } if err != nil { return err @@ -139,7 +138,7 @@ func (s *ConfigMapStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { next.ResourceVersion = current.ResourceVersion _, err = s.client.CoreV1().ConfigMaps(s.namespace).Update(context.Background(), next, metav1.UpdateOptions{}) if apierrors.IsNotFound(err) { - return coreservices.ErrScrollNotFound + return domain.ErrRuntimeScrollNotFound } return err } @@ -147,7 +146,7 @@ func (s *ConfigMapStateStore) UpdateScroll(scroll *domain.RuntimeScroll) error { func (s *ConfigMapStateStore) DeleteScroll(id string) error { err := s.client.CoreV1().ConfigMaps(s.namespace).Delete(context.Background(), scrollConfigMapName(id), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { - return coreservices.ErrScrollNotFound + return domain.ErrRuntimeScrollNotFound } return err } diff --git a/internal/runtime/kubernetes/state_store_test.go b/internal/runtime/kubernetes/state_store_test.go index b7d5e7d2..558783b4 100644 --- a/internal/runtime/kubernetes/state_store_test.go +++ b/internal/runtime/kubernetes/state_store_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/highcard-dev/daemon/internal/core/domain" - coreservices "github.com/highcard-dev/daemon/internal/core/services" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" ) @@ -68,8 +67,8 @@ func TestConfigMapStateStoreRoundTripsRuntimeScroll(t *testing.T) { if err := store.DeleteScroll("container-lab"); err != nil { t.Fatal(err) } - if _, err := store.GetScroll("container-lab"); !errors.Is(err, coreservices.ErrScrollNotFound) { - t.Fatalf("GetScroll after delete error = %v, want ErrScrollNotFound", err) + if _, err := store.GetScroll("container-lab"); !errors.Is(err, domain.ErrRuntimeScrollNotFound) { + t.Fatalf("GetScroll after delete error = %v, want domain.ErrRuntimeScrollNotFound", err) } } @@ -86,18 +85,18 @@ func TestConfigMapStateStoreDuplicateCreateReturnsConflict(t *testing.T) { if err := store.CreateScroll(scroll); err != nil { t.Fatal(err) } - if err := store.CreateScroll(scroll); !errors.Is(err, coreservices.ErrScrollAlreadyExists) { - t.Fatalf("CreateScroll duplicate error = %v, want ErrScrollAlreadyExists", err) + if err := store.CreateScroll(scroll); !errors.Is(err, domain.ErrRuntimeScrollAlreadyExists) { + t.Fatalf("CreateScroll duplicate error = %v, want domain.ErrRuntimeScrollAlreadyExists", err) } } func TestConfigMapStateStoreMissingScrollReturnsNotFound(t *testing.T) { store := NewConfigMapStateStoreWithClient("druid", fake.NewSimpleClientset()) - if _, err := store.GetScroll("missing"); !errors.Is(err, coreservices.ErrScrollNotFound) { - t.Fatalf("GetScroll error = %v, want ErrScrollNotFound", err) + if _, err := store.GetScroll("missing"); !errors.Is(err, domain.ErrRuntimeScrollNotFound) { + t.Fatalf("GetScroll error = %v, want domain.ErrRuntimeScrollNotFound", err) } - if err := store.DeleteScroll("missing"); !errors.Is(err, coreservices.ErrScrollNotFound) { - t.Fatalf("DeleteScroll error = %v, want ErrScrollNotFound", err) + if err := store.DeleteScroll("missing"); !errors.Is(err, domain.ErrRuntimeScrollNotFound) { + t.Fatalf("DeleteScroll error = %v, want domain.ErrRuntimeScrollNotFound", err) } } diff --git a/test/integration/docker/docker_cli_test.go b/test/integration/docker/docker_cli_test.go index 55dbc642..a5842f3d 100644 --- a/test/integration/docker/docker_cli_test.go +++ b/test/integration/docker/docker_cli_test.go @@ -231,8 +231,6 @@ ports: - name: http protocol: http port: %d - mandatory: true - sleep_handler: generic commands: start: run: restart @@ -245,12 +243,11 @@ commands: mounts: - path: /runtime sub_path: . + env: + DRUID_ROOT: /runtime + DRUID_PORT_HTTP_COLDSTARTER: generic command: - druid-coldstarter - - --root - - /runtime - - --status-file - - .coldstarter-finished.json - id: web image: busybox:1.36 expectedPorts: diff --git a/test/integration/internal/e2e/harness.go b/test/integration/internal/e2e/harness.go index dcbac7e8..71fb4626 100644 --- a/test/integration/internal/e2e/harness.go +++ b/test/integration/internal/e2e/harness.go @@ -203,7 +203,6 @@ ports: - name: http protocol: http port: %d - mandatory: true commands: serve: run: persistent diff --git a/test/mock/services.go b/test/mock/services.go index 231950e0..d27d75cd 100644 --- a/test/mock/services.go +++ b/test/mock/services.go @@ -62,7 +62,7 @@ func (mr *MockAuthorizerServiceInterfaceMockRecorder) CheckHeader(r any) *gomock } // CheckQuery mocks base method. -func (m *MockAuthorizerServiceInterface) CheckQuery(runtimeID string, token string) (*ports.AuthContext, error) { +func (m *MockAuthorizerServiceInterface) CheckQuery(runtimeID, token string) (*ports.AuthContext, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CheckQuery", runtimeID, token) ret0, _ := ret[0].(*ports.AuthContext) @@ -77,7 +77,7 @@ func (mr *MockAuthorizerServiceInterfaceMockRecorder) CheckQuery(runtimeID, toke } // GenerateQueryToken mocks base method. -func (m *MockAuthorizerServiceInterface) GenerateQueryToken(runtimeID string, ownerID string) string { +func (m *MockAuthorizerServiceInterface) GenerateQueryToken(runtimeID, ownerID string) string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateQueryToken", runtimeID, ownerID) ret0, _ := ret[0].(string) @@ -382,20 +382,6 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) Name() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).Name)) } -// StartDev mocks base method. -func (m *MockRuntimeBackendInterface) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartDev", ctx, action) - ret0, _ := ret[0].(error) - return ret0 -} - -// StartDev indicates an expected call of StartDev. -func (mr *MockRuntimeBackendInterfaceMockRecorder) StartDev(ctx, action any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartDev", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StartDev), ctx, action) -} - // ReadScrollFile mocks base method. func (m *MockRuntimeBackendInterface) ReadScrollFile(root string) ([]byte, error) { m.ctrl.T.Helper() @@ -425,6 +411,20 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) RestoreRuntime(ctx, root, art return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).RestoreRuntime), ctx, root, artifact, registryCredentials) } +// RootRef mocks base method. +func (m *MockRuntimeBackendInterface) RootRef(id, namespace string) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RootRef", id, namespace) + ret0, _ := ret[0].(string) + return ret0 +} + +// RootRef indicates an expected call of RootRef. +func (mr *MockRuntimeBackendInterfaceMockRecorder) RootRef(id, namespace any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RootRef", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).RootRef), id, namespace) +} + // RoutingTargets mocks base method. func (m *MockRuntimeBackendInterface) RoutingTargets(root string, commands map[string]*domain.CommandInstructionSet, globalPorts []domain.Port) ([]domain.RuntimeRoutingTarget, error) { m.ctrl.T.Helper() @@ -483,18 +483,18 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) SpawnPullWorker(ctx, action a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SpawnPullWorker", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).SpawnPullWorker), ctx, action) } -// StopRuntime mocks base method. -func (m *MockRuntimeBackendInterface) StopRuntime(root string) error { +// StartDev mocks base method. +func (m *MockRuntimeBackendInterface) StartDev(ctx context.Context, action ports.RuntimeDevAction) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopRuntime", root) + ret := m.ctrl.Call(m, "StartDev", ctx, action) ret0, _ := ret[0].(error) return ret0 } -// StopRuntime indicates an expected call of StopRuntime. -func (mr *MockRuntimeBackendInterfaceMockRecorder) StopRuntime(root any) *gomock.Call { +// StartDev indicates an expected call of StartDev. +func (mr *MockRuntimeBackendInterfaceMockRecorder) StartDev(ctx, action any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StopRuntime), root) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartDev", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StartDev), ctx, action) } // StopDev mocks base method. @@ -511,6 +511,144 @@ func (mr *MockRuntimeBackendInterfaceMockRecorder) StopDev(ctx, root any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopDev", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StopDev), ctx, root) } +// StopRuntime mocks base method. +func (m *MockRuntimeBackendInterface) StopRuntime(root string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StopRuntime", root) + ret0, _ := ret[0].(error) + return ret0 +} + +// StopRuntime indicates an expected call of StopRuntime. +func (mr *MockRuntimeBackendInterfaceMockRecorder) StopRuntime(root any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopRuntime", reflect.TypeOf((*MockRuntimeBackendInterface)(nil).StopRuntime), root) +} + +// MockRuntimeScrollStore is a mock of RuntimeScrollStore interface. +type MockRuntimeScrollStore struct { + ctrl *gomock.Controller + recorder *MockRuntimeScrollStoreMockRecorder + isgomock struct{} +} + +// MockRuntimeScrollStoreMockRecorder is the mock recorder for MockRuntimeScrollStore. +type MockRuntimeScrollStoreMockRecorder struct { + mock *MockRuntimeScrollStore +} + +// NewMockRuntimeScrollStore creates a new mock instance. +func NewMockRuntimeScrollStore(ctrl *gomock.Controller) *MockRuntimeScrollStore { + mock := &MockRuntimeScrollStore{ctrl: ctrl} + mock.recorder = &MockRuntimeScrollStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRuntimeScrollStore) EXPECT() *MockRuntimeScrollStoreMockRecorder { + return m.recorder +} + +// CreateScroll mocks base method. +func (m *MockRuntimeScrollStore) CreateScroll(scroll *domain.RuntimeScroll) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateScroll", scroll) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateScroll indicates an expected call of CreateScroll. +func (mr *MockRuntimeScrollStoreMockRecorder) CreateScroll(scroll any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateScroll", reflect.TypeOf((*MockRuntimeScrollStore)(nil).CreateScroll), scroll) +} + +// DeleteScroll mocks base method. +func (m *MockRuntimeScrollStore) DeleteScroll(id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteScroll", id) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteScroll indicates an expected call of DeleteScroll. +func (mr *MockRuntimeScrollStoreMockRecorder) DeleteScroll(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScroll", reflect.TypeOf((*MockRuntimeScrollStore)(nil).DeleteScroll), id) +} + +// GetScroll mocks base method. +func (m *MockRuntimeScrollStore) GetScroll(id string) (*domain.RuntimeScroll, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetScroll", id) + ret0, _ := ret[0].(*domain.RuntimeScroll) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetScroll indicates an expected call of GetScroll. +func (mr *MockRuntimeScrollStoreMockRecorder) GetScroll(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScroll", reflect.TypeOf((*MockRuntimeScrollStore)(nil).GetScroll), id) +} + +// ListScrolls mocks base method. +func (m *MockRuntimeScrollStore) ListScrolls() ([]*domain.RuntimeScroll, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListScrolls") + ret0, _ := ret[0].([]*domain.RuntimeScroll) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListScrolls indicates an expected call of ListScrolls. +func (mr *MockRuntimeScrollStoreMockRecorder) ListScrolls() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListScrolls", reflect.TypeOf((*MockRuntimeScrollStore)(nil).ListScrolls)) +} + +// Root mocks base method. +func (m *MockRuntimeScrollStore) Root(id string) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Root", id) + ret0, _ := ret[0].(string) + return ret0 +} + +// Root indicates an expected call of Root. +func (mr *MockRuntimeScrollStoreMockRecorder) Root(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Root", reflect.TypeOf((*MockRuntimeScrollStore)(nil).Root), id) +} + +// StateDir mocks base method. +func (m *MockRuntimeScrollStore) StateDir() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDir") + ret0, _ := ret[0].(string) + return ret0 +} + +// StateDir indicates an expected call of StateDir. +func (mr *MockRuntimeScrollStoreMockRecorder) StateDir() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDir", reflect.TypeOf((*MockRuntimeScrollStore)(nil).StateDir)) +} + +// UpdateScroll mocks base method. +func (m *MockRuntimeScrollStore) UpdateScroll(scroll *domain.RuntimeScroll) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateScroll", scroll) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateScroll indicates an expected call of UpdateScroll. +func (mr *MockRuntimeScrollStoreMockRecorder) UpdateScroll(scroll any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScroll", reflect.TypeOf((*MockRuntimeScrollStore)(nil).UpdateScroll), scroll) +} + // MockBroadcastChannelInterface is a mock of BroadcastChannelInterface interface. type MockBroadcastChannelInterface struct { ctrl *gomock.Controller From 5147438b69167c2338bbd45cf0830c72e0f26c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Schottst=C3=A4dt?= Date: Sat, 16 May 2026 01:03:32 +0200 Subject: [PATCH 6/6] feat: enhance runtime scroll creation with UUID generation and timeout settings - Added UUID generation for runtime scroll IDs when omitted during creation, ensuring unique identifiers. - Implemented a timeout for HTTP clients in the OpenAPI client to improve reliability. - Updated OpenAPI specifications and related documentation to reflect changes in ID handling. - Refactored error handling in the runtime supervisor to manage scroll creation more effectively. - Enhanced tests to cover new functionality and ensure robustness in scroll management. --- Makefile | 5 +- api/openapi.yaml | 4 +- .../adapters/daemonclient/openapi_client.go | 16 ++-- .../daemonclient/openapi_client_test.go | 18 ++++ .../adapters/http/handlers/scroll_handler.go | 6 -- .../core/services/runtime_materialization.go | 14 ++- .../druid/core/services/runtime_supervisor.go | 55 +++++------- .../core/services/runtime_supervisor_test.go | 78 +++++++++++++++++ go.mod | 2 +- internal/api/generated.go | 85 ++++++++++--------- internal/runtime/docker/backend.go | 13 +++ internal/runtime/docker/state_store.go | 76 +++++++++++------ internal/runtime/docker/state_store_test.go | 45 ++++++++++ 13 files changed, 290 insertions(+), 127 deletions(-) diff --git a/Makefile b/Makefile index 1091bda3..e3c230e9 100644 --- a/Makefile +++ b/Makefile @@ -35,8 +35,9 @@ k3d-build-pull-image: ## Build the unified Druid runtime image and import it int build-x86-docker: docker run -e GOOS=linux -e GOARCH=amd64 -it --rm -v ./:/app -w /app --entrypoint=/bin/bash docker.elastic.co/beats-dev/golang-crossbuild:1.22.5-main -c 'CGO_ENABLED=1 go build -ldflags "-X github.com/highcard-dev/daemon/internal.Version=$(VERSION)" -o ./bin/x86/druid' -install: ## Install Daemon - cp ./bin/druid /usr/local/bin/druid +install: build ## Build and install Druid binaries + install -m 0755 ./bin/druid /usr/local/bin/druid + install -m 0755 ./bin/druid-coldstarter /usr/local/bin/druid-coldstarter generate-md-docs: go run ./docs_md/main.go diff --git a/api/openapi.yaml b/api/openapi.yaml index 230155bd..9991ccff 100644 --- a/api/openapi.yaml +++ b/api/openapi.yaml @@ -76,11 +76,11 @@ components: properties: id: type: string - description: Deprecated alias for name. Optional local runtime scroll id/name. + description: Deprecated alias for name. Optional stable runtime id/name. If omitted, the daemon generates an id. example: jobs name: type: string - description: Optional local runtime scroll id/name. If omitted, the daemon derives it from scroll.yaml name. + description: Optional stable runtime id/name. If omitted, the daemon generates an id; the display name still comes from scroll.yaml. example: jobs artifact: type: string diff --git a/apps/druid/adapters/daemonclient/openapi_client.go b/apps/druid/adapters/daemonclient/openapi_client.go index 51e0b88d..711548f7 100644 --- a/apps/druid/adapters/daemonclient/openapi_client.go +++ b/apps/druid/adapters/daemonclient/openapi_client.go @@ -4,19 +4,19 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "io" "net" "net/http" "net/url" "strings" + "time" "github.com/highcard-dev/daemon/internal/api" "github.com/highcard-dev/daemon/internal/utils" ) -var ErrMaterializationUnsupported = errors.New("daemon materialization unsupported") +const daemonRequestTimeout = 5 * time.Second type OpenAPIClient struct { client *api.ClientWithResponses @@ -31,21 +31,22 @@ func NewOpenAPIClient(daemonSocket string) (*OpenAPIClient, error) { func NewOpenAPIClientForTarget(daemonSocket string, daemonURL string) (*OpenAPIClient, error) { if daemonURL != "" { server := strings.TrimRight(daemonURL, "/") - client, err := api.NewClientWithResponses(server) + httpClient := &http.Client{Timeout: daemonRequestTimeout} + client, err := api.NewClientWithResponses(server, api.WithHTTPClient(httpClient)) if err != nil { return nil, err } - return &OpenAPIClient{client: client, server: server, httpClient: http.DefaultClient}, nil + return &OpenAPIClient{client: client, server: server, httpClient: httpClient}, nil } if daemonSocket == "" { daemonSocket = utils.DefaultRuntimeSocketPath() } transport := &http.Transport{ DialContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, "unix", daemonSocket) + return (&net.Dialer{Timeout: daemonRequestTimeout}).DialContext(ctx, "unix", daemonSocket) }, } - httpClient := &http.Client{Transport: transport} + httpClient := &http.Client{Transport: transport, Timeout: daemonRequestTimeout} client, err := api.NewClientWithResponses("http://druid", api.WithHTTPClient(httpClient)) if err != nil { return nil, err @@ -69,9 +70,6 @@ func (c *OpenAPIClient) CreateScroll(ctx context.Context, name string, artifact if err != nil { return nil, err } - if res.StatusCode() == http.StatusNotImplemented { - return nil, ErrMaterializationUnsupported - } if err := ensureStatus(res.StatusCode(), res.Body); err != nil { return nil, err } diff --git a/apps/druid/adapters/daemonclient/openapi_client_test.go b/apps/druid/adapters/daemonclient/openapi_client_test.go index d94945b3..8d7afa3b 100644 --- a/apps/druid/adapters/daemonclient/openapi_client_test.go +++ b/apps/druid/adapters/daemonclient/openapi_client_test.go @@ -9,6 +9,24 @@ import ( "github.com/highcard-dev/daemon/internal/api" ) +func TestOpenAPIClientHasDaemonTimeout(t *testing.T) { + socketClient, err := NewOpenAPIClientForTarget("/tmp/druid-test.sock", "") + if err != nil { + t.Fatal(err) + } + if socketClient.httpClient.Timeout != daemonRequestTimeout { + t.Fatalf("socket timeout = %s, want %s", socketClient.httpClient.Timeout, daemonRequestTimeout) + } + + urlClient, err := NewOpenAPIClientForTarget("", "http://127.0.0.1:1") + if err != nil { + t.Fatal(err) + } + if urlClient.httpClient.Timeout != daemonRequestTimeout { + t.Fatalf("url timeout = %s, want %s", urlClient.httpClient.Timeout, daemonRequestTimeout) + } +} + func TestCreateScrollDoesNotSendStart(t *testing.T) { var got map[string]interface{} server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/apps/druid/adapters/http/handlers/scroll_handler.go b/apps/druid/adapters/http/handlers/scroll_handler.go index 11f7d81e..27af3ee8 100644 --- a/apps/druid/adapters/http/handlers/scroll_handler.go +++ b/apps/druid/adapters/http/handlers/scroll_handler.go @@ -79,9 +79,6 @@ func (h *ScrollHandler) CreateScroll(c *fiber.Ctx) error { if errors.Is(err, domain.ErrRuntimeScrollAlreadyExists) { return fiber.NewError(fiber.StatusConflict, err.Error()) } - if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } return err } return c.Status(fiber.StatusCreated).JSON(runtimeScroll) @@ -108,9 +105,6 @@ func (h *ScrollHandler) EnsureScroll(c *fiber.Ctx) error { } runtimeScroll, err := h.supervisor.EnsureWithOwner(request.Artifact, name, ownerID, namespace, registryCredentials(request.RegistryCredentials)) if err != nil { - if errors.Is(err, appservices.ErrRuntimeMaterializationUnsupported) { - return fiber.NewError(fiber.StatusNotImplemented, err.Error()) - } return err } return c.JSON(runtimeScroll) diff --git a/apps/druid/core/services/runtime_materialization.go b/apps/druid/core/services/runtime_materialization.go index 1ccd59e4..11f5fc4d 100644 --- a/apps/druid/core/services/runtime_materialization.go +++ b/apps/druid/core/services/runtime_materialization.go @@ -9,23 +9,19 @@ import ( "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" - coreservices "github.com/highcard-dev/daemon/internal/core/services" "github.com/highcard-dev/daemon/internal/core/services/registry" "github.com/highcard-dev/daemon/internal/utils/logger" "go.uber.org/zap" ) -var ErrRuntimeMaterializationUnsupported = errors.New("runtime backend does not support daemon materialization") - -func (s *RuntimeSupervisor) materializeNewScroll(ctx context.Context, runtimeService ports.RuntimeBackendInterface, artifact string, name string, namespace string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { - id := coreservices.RuntimeScrollIDFromName(name) - if id == "" { - return nil, ErrRuntimeMaterializationUnsupported - } - return s.runPullWorker(ctx, runtimeService, ports.RuntimeWorkerModeCreate, id, artifact, runtimeService.RootRef(id, namespace), registryCredentials) +func (s *RuntimeSupervisor) materializeNewScroll(ctx context.Context, runtimeService ports.RuntimeBackendInterface, artifact string, runtimeID string, namespace string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { + return s.runPullWorker(ctx, runtimeService, ports.RuntimeWorkerModeCreate, runtimeID, artifact, runtimeService.RootRef(runtimeID, namespace), registryCredentials) } func (s *RuntimeSupervisor) runPullWorker(ctx context.Context, runtimeService ports.RuntimeBackendInterface, mode ports.RuntimeWorkerMode, runtimeID string, artifact string, root string, registryCredentials []domain.RegistryCredential) (*ports.RuntimeMaterialization, error) { + if s.workerCallbacks == nil || s.workerCallbackURL == "" { + return nil, fmt.Errorf("daemon materialization requires --worker-callback-url and --worker-callback-listen") + } token, resultCh, err := s.workerCallbacks.Register(runtimeID) if err != nil { return nil, err diff --git a/apps/druid/core/services/runtime_supervisor.go b/apps/druid/core/services/runtime_supervisor.go index c97a0cdf..b0a9e78a 100644 --- a/apps/druid/core/services/runtime_supervisor.go +++ b/apps/druid/core/services/runtime_supervisor.go @@ -7,6 +7,7 @@ import ( "strings" "sync" + "github.com/google/uuid" "github.com/highcard-dev/daemon/internal/core/domain" "github.com/highcard-dev/daemon/internal/core/ports" coreservices "github.com/highcard-dev/daemon/internal/core/services" @@ -83,35 +84,32 @@ func (s *RuntimeSupervisor) Create(artifact string, name string, registryCredent func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerID string, namespace string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { id := coreservices.RuntimeScrollIDFromName(name) - var placeholder *domain.RuntimeScroll - if id != "" { - if _, err := s.store.GetScroll(id); err == nil { - return nil, fmt.Errorf("%w: %s", domain.ErrRuntimeScrollAlreadyExists, id) - } else if !errors.Is(err, domain.ErrRuntimeScrollNotFound) { - return nil, err - } - placeholder = &domain.RuntimeScroll{ - ID: id, - OwnerID: ownerID, - Artifact: artifact, - Root: s.runtimeBackend.RootRef(id, namespace), - Status: domain.RuntimeScrollStatusCreated, - Commands: map[string]domain.LockStatus{}, - } - if err := s.store.CreateScroll(placeholder); err != nil { - return nil, err - } + if id == "" { + id = uuid.NewString() + } + if _, err := s.store.GetScroll(id); err == nil { + return nil, fmt.Errorf("%w: %s", domain.ErrRuntimeScrollAlreadyExists, id) + } else if !errors.Is(err, domain.ErrRuntimeScrollNotFound) { + return nil, err + } + placeholder := &domain.RuntimeScroll{ + ID: id, + OwnerID: ownerID, + Artifact: artifact, + Root: s.runtimeBackend.RootRef(id, namespace), + Status: domain.RuntimeScrollStatusCreated, + Commands: map[string]domain.LockStatus{}, + } + if err := s.store.CreateScroll(placeholder); err != nil { + return nil, err } markPlaceholderError := func(cause error) { - if placeholder == nil { - return - } placeholder.Status = domain.RuntimeScrollStatusError placeholder.LastError = cause.Error() _ = s.store.UpdateScroll(placeholder) } - materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, name, namespace, registryCredentials) + materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, id, namespace, registryCredentials) if err != nil { markPlaceholderError(err) return nil, err @@ -119,18 +117,11 @@ func (s *RuntimeSupervisor) CreateWithOwner(artifact string, name string, ownerI if materialized.Artifact != "" { artifact = materialized.Artifact } - if placeholder != nil { - placeholder, err = s.applyMaterializedScroll(placeholder, artifact, materialized) - if err != nil { - return nil, err - } - return placeholder, nil - } - runtimeScroll, err := s.manager.CreateWithDigest(artifact, materialized.ArtifactDigest, name, ownerID, materialized.Root, materialized.ScrollYAML) + placeholder, err = s.applyMaterializedScroll(placeholder, artifact, materialized) if err != nil { return nil, err } - return runtimeScroll, nil + return placeholder, nil } func (s *RuntimeSupervisor) Ensure(artifact string, name string, registryCredentials []domain.RegistryCredential) (*domain.RuntimeScroll, error) { @@ -161,7 +152,7 @@ func (s *RuntimeSupervisor) EnsureWithOwner(artifact string, name string, ownerI if artifact == "" { artifact = runtimeScroll.Artifact } - materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, name, namespace, registryCredentials) + materialized, err := s.materializeNewScroll(context.Background(), s.runtimeBackend, artifact, id, namespace, registryCredentials) if err != nil { runtimeScroll.Status = domain.RuntimeScrollStatusError runtimeScroll.LastError = err.Error() diff --git a/apps/druid/core/services/runtime_supervisor_test.go b/apps/druid/core/services/runtime_supervisor_test.go index db7eabaa..4c178d25 100644 --- a/apps/druid/core/services/runtime_supervisor_test.go +++ b/apps/druid/core/services/runtime_supervisor_test.go @@ -218,6 +218,38 @@ func TestRuntimeSupervisorCreateCanCreate(t *testing.T) { } } +func TestRuntimeSupervisorCreateGeneratesIDWhenNameOmitted(t *testing.T) { + store := newTestStateStore(t) + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, scrollYAML: cachedScrollYAML("start"), digest: "sha256:generated"} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + runtimeScroll, err := supervisor.Create("registry.local/lab:1.0", "", nil) + if err != nil { + t.Fatal(err) + } + if runtimeScroll.ID == "" || runtimeScroll.ID == "cached" { + t.Fatalf("id = %q, want generated runtime id independent from scroll.yaml name", runtimeScroll.ID) + } + if runtimeScroll.ScrollName != "cached" { + t.Fatalf("scroll name = %q, want cached", runtimeScroll.ScrollName) + } + if backend.action.RuntimeID != runtimeScroll.ID || backend.action.RootRef != backend.RootRef(runtimeScroll.ID, "") { + t.Fatalf("worker action = %#v scroll = %#v", backend.action, runtimeScroll) + } + if backend.action.Mode != ports.RuntimeWorkerModeCreate || backend.action.CallbackToken == "" { + t.Fatalf("worker action = %#v", backend.action) + } + if runtimeScroll.ArtifactDigest != "sha256:generated" || runtimeScroll.Status != domain.RuntimeScrollStatusCreated { + t.Fatalf("runtime scroll = %#v", runtimeScroll) + } +} + func TestRuntimeSupervisorCreateUsesPullWorkerBeforeStateMutation(t *testing.T) { store := newTestStateStore(t) callbacks := NewWorkerCallbackManager() @@ -250,6 +282,52 @@ func TestRuntimeSupervisorCreateUsesPullWorkerBeforeStateMutation(t *testing.T) } } +func TestRuntimeSupervisorCreateWorkerFailureLeavesGeneratedPlaceholder(t *testing.T) { + store := newTestStateStore(t) + callbacks := NewWorkerCallbackManager() + backend := &fakeWorkerBackend{callbacks: callbacks, workerErr: errors.New("pull image failed")} + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + backend, + ) + supervisor.SetWorkerCallbacks(callbacks, "http://druid-cli:8083") + + if _, err := supervisor.Create("registry.local/missing:1.0", "", nil); err == nil { + t.Fatal("Create error = nil, want worker error") + } + scrolls, err := store.ListScrolls() + if err != nil { + t.Fatal(err) + } + if len(scrolls) != 1 { + t.Fatalf("scrolls = %#v, want one failed placeholder", scrolls) + } + if scrolls[0].Status != domain.RuntimeScrollStatusError || !strings.Contains(scrolls[0].LastError, "pull image failed") { + t.Fatalf("placeholder = %#v, want remembered worker failure", scrolls[0]) + } +} + +func TestRuntimeSupervisorCreateRequiresWorkerCallbackConfig(t *testing.T) { + store := newTestStateStore(t) + supervisor := NewRuntimeSupervisor( + store, + coreservices.NewRuntimeScrollManager(store), + &fakeWorkerBackend{scrollYAML: cachedScrollYAML("start")}, + ) + + if _, err := supervisor.Create("registry.local/lab:1.0", "missing-callbacks", nil); err == nil || !strings.Contains(err.Error(), "daemon materialization requires --worker-callback-url and --worker-callback-listen") { + t.Fatalf("Create error = %v, want explicit callback config error", err) + } + runtimeScroll, err := store.GetScroll("missing-callbacks") + if err != nil { + t.Fatal(err) + } + if runtimeScroll.Status != domain.RuntimeScrollStatusError || !strings.Contains(runtimeScroll.LastError, "--worker-callback-url") { + t.Fatalf("runtime scroll = %#v, want callback config error", runtimeScroll) + } +} + func TestRuntimeSupervisorCreateUsesRequestedNamespaceForRoot(t *testing.T) { store := newTestStateStore(t) callbacks := NewWorkerCallbackManager() diff --git a/go.mod b/go.mod index c423b0d1..9c434b57 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/gofiber/contrib/websocket v1.3.4 github.com/gofiber/fiber/v2 v2.52.9 + github.com/google/uuid v1.6.0 github.com/opencontainers/image-spec v1.1.1 github.com/spf13/cobra v1.9.1 github.com/spf13/viper v1.20.1 @@ -22,7 +23,6 @@ require ( github.com/fsnotify/fsnotify v1.9.0 github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/joho/godotenv v1.5.1 github.com/josharian/intern v1.0.0 // indirect diff --git a/internal/api/generated.go b/internal/api/generated.go index cdece059..8247a09f 100644 --- a/internal/api/generated.go +++ b/internal/api/generated.go @@ -44,10 +44,10 @@ type CreateScrollRequest struct { // Artifact OCI artifact reference or local scroll path Artifact string `json:"artifact"` - // Id Deprecated alias for name. Optional local runtime scroll id/name. + // Id Deprecated alias for name. Optional stable runtime id/name. If omitted, the daemon generates an id. Id *string `json:"id,omitempty"` - // Name Optional local runtime scroll id/name. If omitted, the daemon derives it from scroll.yaml name. + // Name Optional stable runtime id/name. If omitted, the daemon generates an id; the display name still comes from scroll.yaml. Name *string `json:"name,omitempty"` // Namespace Kubernetes namespace for runtime resources. Ignored by non-Kubernetes backends. @@ -2950,46 +2950,47 @@ func RegisterHandlersWithOptions(router fiber.Router, si ServerInterface, option // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xaS3Mcue3/Kqz+/48jjZx1ctBNK2c32vWWFckpH3ZdKg6J6aHFJimSLWmimu+e4qun", - "H+x5WU4k115c1hAAgR8AEkDzqSCyUlKAsKY4fSoMWUCF/X/PlOLLK1lbJsoruKvBWPez0lKBtgw8ETaG", - "laJK7MxC5f/z/xrmxWnxf9O1+GmUPb2qhWUVONFw1vAXq0lhlwqK0wJrjZfFajUpNNzVTAMtTn/vbPW5", - "oZWzL0A887msKizotcW2Nr9h5dWjlFkmBeaXLbWtriEnQAO2cE205HzcYG3ZHBO/QsEQzZTboDgtPpxf", - "oLSKNMxBgyCApEZcEsyR8YKRwnZRTAp4xJXiwdrAY46prhk9LsupBWP9P6fun6LR1VjNROl0ZXSowDtQ", - "Ggi2QBHmDBs0lxoJXMEx+qACClEVHTyQVGJ06sk6an2RM5Pb2VFmjN9pA3QxR7Ji1gKdILsARDFUUiAK", - "mt2DQcyiuZZVZDte4oqj/TQzCpOMer/WM9ACLBjUUHl4kqIajKw1AXOMLkohNVA0WyIhxVGLdYbJLQhq", - "jnO7ywcB+ibnlxjvyFMgRlFtgPrdSW2srEAfzTFhokTapQTCtV1Izf6NHX92Lw0lM1Yvb4gGCsIyzPdI", - "v8h83vBuT70U9Lm8ewccLNCQN8OECYgMTDA+Td3S2rE0SBpa3FOHOZIoIKfR34Wp9T6JPNAuLd5QVkbu", - "kRQczY8/w/NlhOc/AHO7uAKjpDAwjINK0oxHzmutQVi08NwoBBvytO2jSN7m7FdalhqMGYq9jCtIgSYg", - "LC6Dn7nE1CHsFPO4ugNuLnWFbXFazLnE7hao8COr6qo4fXNyMikqJsJfJ40Koq5moGN6aXtDsc3Y9mkB", - "on36elqfds2OjvHIRUUxKUTNOZ45czv35khueohyfsj4deCLhRzJNYWNeZA6n3G1AT2SdT3lvPwWQ0tw", - "VuWQGWcxvD4k5xx2poxlBYU5rrktTueYG5g8X5a4Lb1vW+rMpOSAxX4pFHG4lNpeN8d21/KZrAXN7TPx", - "oN8wlcXErynZUZEJC2WI4lsAdcbZPXzUeD5nJCuDY2NvMLHsntnlDfaisoG8+1E9rpHSkgCt9QifllYS", - "yfP+f7yZLW2Aq9GPCfu3t2vdWjuFAz8ryQ7QaMEdF/faK/HI280yH5ig8iGv0z7W9YIv5WKDbfRAC9BJ", - "jLC18Q1CGyK232IMwhYerTsK+Kb43O+Wd4rfjK9uChBVzzgjG9Kh1jx/xm2yn4nyI9YlZKzfrVTZJzu2", - "GX9o7hjgQKzU4z1dLiT7qBjQ94zAuIaJYMzC3eL2Jv0+iOGOAr3tNoTxWGm98bohoR02e7fBxLfBdK+T", - "dKQW9kczaB0ct7EmHR6XUo7coyGon33cMClC27khPNY9i3CF1+8Jq2JS6FoIR+aopFL+t2D5pOlrPmeQ", - "qxXdE+1cM9REQsSta0ujece5nb1z0RfC7r0sNw5TWl4Yy8AG4N4WPuNIrZldXjtnxVICsAZ9VtvF+q+f", - "Ei6/fProAW3Xs798+oisvAURhgfMF0R2iZSW94yC9mg48e5S8+LWoC6sVV4zx5/27Iq/Xkhtj1wVQtFd", - "DXqZNpMafYLZtSS3YBGRQgBJpTtzjJ64SHdF2GK9M1bsV3C9jEsfMZduYyKFDfm86hv5TteMovP3F4jj", - "WpAFGIQFRRUWuASDPCcToI98F0jT5AUrxRkJLcUEcXYLf4gSV4Dc2QPaTBDFFs+wATPxAh9gltaO//Dq", - "MutbnUaBYlK41aDWyfGb4xOfywoEVqw4LX7wP7nS2i68Q6dYsen9m2nopdwv8TbqWvgz2NSRdLquwgsP", - "lfcFDYShqfP+8lWu7+38Zn85OUlIxhu/BcH0i3FbpTHntpOj1zp6V3V1DhQ+tv968sN/cePrcHWgWuB7", - "zEJ/5vOpriqslxHOPo4Wl8b3QcETkyLgXXx2rMlNIXJMy09d+N8zY68jzVeCv8/5HS/B4SxggM1VZ/ho", - "erg49XvzyTY0caWNjbvJTQaI9ry4CIcyGPujpMtnC4TcSHrVvQHcLb4a+OHNs6nQg38b3ChdiV3UgyE9", - "3DfDPgzJKfjBni+Esh5pD/6+kUdys8WdPHLyP/NIQK3vkWBIf1IPj8zYcLXIONLnyzAhMnu764nRVTjn", - "Xf0zdFeYHDfuUljjCixot8VTuEPj15J4hfpapwv0pAVav1D6/A2d0J16b3dCqgFXk+LtydvxKWwkF9Ki", - "ue94u14L2+6VR5P8Mf4z2NeJ/J7h/7WIu3v0K48tlwdTV5fVavzs+tGvf2OXPP95uG1G+tLOxgAzcqwx", - "Ibun4iOQupVg0WsHeTx14NOn+L/VuPevahF0jl+xv0UETLJCSLPhK0nvf4WutZ+UX5nmV7Xo34VrZA5y", - "vpizcrSIbk7f80D3As/gfrM+cMQl1mbdaUaDh4enypEdiqmRHMxOqAbKF4jrHsO4IebJMHQLy/AReD19", - "HEIf0DsyRCqgiKxBOQB8LssdgH/vqF5ZQdGZc2UwdzYdgjcPWCSs458J8nGkldR2B6gvZajJXxzW+3T1", - "rW+Ke3f2yAGVhhvPXuV1pB+UMU2k7OLMNe0ry57Bs7vcTZGs83jmMkf1KA4C/K6GGrZj/U9P9h3C7A0b", - "hTiF912Lag3zXQRl+/GkwVi5aQpzFQj+bGW+dZ8bcN65l0mOOyi3Wl/d8l73z4XjXCzSvh7X5946vzR3", - "j/U9HZ9fgjbM2PiQTOqj8GoaKAqvC5BufDMMgvAdelsITK1/TrDDpdZ5fvDqS5XuY4pdqpXAgBJemXrR", - "4hmH9HAz+aZhOMBHzXuvfJJeu+XvdOx3HZ4xbs4PT/Qs8zxjpdoEtFTfLc7+fcM2nKXqF9QPUt9yialB", - "DwvGASkN/gWMKP1H6C1uiM9zEohjX8fPLi+K+FKqmBYOqSh08Fk/KBU+oFcgrP/oEWc+CPzd6SgbHzVz", - "pqdhf4iM1YArZ4rj1mA1g3vM19y++xvyxrIuFmVrZdaMoTAbcmafHvjd/UMDs5bwADPjKTNSXP+FmAjv", - "TpgUXkDT9EQB/swZ8oav1IgsgNyaLGP8zjxk/a3mlh3FuEhhkrM+RcJQxLvwVICzOZAl4Xn2GD5D7p9c", - "AD5gSxbJZxTugUvlIyG+8074ObKMjDMhpA2ozZ04TAiYlvW4WTfF6vPqPwEAAP//M7dIxGs1AAA=", + "H4sIAAAAAAAC/+xa3XMctw3/VzjbPp50cpP2QX1S5CZV4oxVyR0/JB4Nj8Tt0eKSFMmVdNXc/97h195+", + "cO/Lcit58uKxjgAI/ACQAJZPBZGVkgKENcXpU2HIAirs/3umFF9eydoyUV7BXQ3Gup+Vlgq0ZeCJsDGs", + "FFViZxYq/58/a5gXp8Wfpmvx0yh7elULyypwouGs4S9Wk8IuFRSnBdYaL4vValJouKuZBlqc/tbZ6lND", + "K2efgXjmc1lVWNBri21tfsXKq0cps0wKzC9baltdQ06ABmzhmmjJ+bjB2rI5Jn6FgiGaKbdBcVq8P79A", + "aRVpmIMGQQBJjbgkmCPjBSOF7aKYFPCIK8WDtYHHHFNdM3pcllMLxvp/Tt0/RaOrsZqJ0unK6FCBt6A0", + "EGyBIswZNmguNRK4gmP0XgUUkLF4xgHp4ALE6DQQXMyRrJi1QCfILgBRDJUUqAQBGlswCAvE6HFH8c9y", + "ZnK6OYkZeJ5Hhb+HNWYUx0tvHTKWcY6IrMCguZZVRPp4iSu+u8ZGYZJR+5d6BlqA27+h8sAm/TUYWWsC", + "5hhdlEJqoGi2REKKoxbrDJNbENQc53aXDwL0Tc6jMVOQp0CMotoA9buT2lhZgT6aY8JEibRLJoRru5Ca", + "/Qc7/uxeGkpmrF7eEA0UhGWY75G4kfm84d2etCldchn7FjhYoCHjhqkWEBmYYHyCu6W1Y2mQNLS4pw5z", + "JFFATqN/CFPrfY6AgXZp8YayMnKPJO9o3vwRni8jPP8JmNvFFRglhYFhHFSSZjxyXmsNwqKF50Yh2JCn", + "bR9F8jZnv9Ky1GDMUOxlXEEKNAFhcRn8zCWmDmGnmMfVHXBzqStsi9NiziV290eFH1lVV8Xpm5OTSVEx", + "Ef46aVQQdTUDHdNL2xuKbca2jwsQ7bPZ0/q0a3Z0jEcuKopJIWrO3Vnfu3FHctNDlPNDxq8DXyzkSK4p", + "bMyD1PmMqw3okazrKefltxhagrMqh8w4i+H1PjnnsDNlLCsozHHNbXE6x9zA5PmyxG3pfdtSZyYlByz2", + "S6GIw6XU9ro5truWz2QtaG6fiQf9hqksJn5NyY6KTFgoQxTfAqgzzu7hg8bzOSNZGRwbe4OJZffMLm+w", + "F5UN5N2P6nGNlJYEaK1H+LS0kkie9//jzWxpA1yNfkzYv32/1q21Uzjws5LsAI0W3HFxr70Sj7zdLPOB", + "CSof8jrtY10v+FIuNthGD7QAncQIWxvfILQhYvvNySBs4dG6o4Bvis/9bnmn+M346qYAUfWMM7IhHWrN", + "82fcJvuZKD9gXULG+t1KlX2yY5vxh+aOAQ7ESj3eDeZCso+KAX3PCIxrmAjGLNwtbm/S74MY7ijQ225D", + "GI+V1huvGxIaabN3A018A033OklHamF/NIPWwXEba9LhcSnlyD0agvrZBxWTInScG8Jj3bMIV3j9lrAq", + "JoWuhXBkjkoq5X8Llk+avuZTBrla0T3RzjVDTSRE3Lq2NJp3nNvZOxd9IezeyXLjGKblhbEMbADubeEz", + "jtSa2eW1c1YsJQBr0Ge1Xaz/+jHh8vPHDx7Qdj3788cPyMpbEGFuwHxBZJdIaXnPKGiPhhPvLjUvbg3q", + "wlrlNXP8ac+u+OuF1PbIVSEU3dWgl2kzqdFHmF1LcgsWESkEkFS6M8foiYt0V4Qt1jtjxX4B18u49BFz", + "6TYmUtiQz6u+kW91zSg6f3eBOK4FWfhJCkUVFrgEgzwnE6CPfBdI05wKK8UZCS3FBHF2C7+L0o9bQN+D", + "NhNEscUzbMBMvMAHmKW149+9usz6VqdRoJgUbjWodXL85vjE57ICgRUrTovv/E+utLYL79ApVmx6/2Ya", + "ein3S7yNuhb+BDZ1JJ2uq/DCQ+V9QQNhaOq8v3yV63s7v9lfTk4SkvHGb0Ew/WzcVmlAuu3k6LWO3lVd", + "nQOFj+2/nnz3P9z4OlwdqBb4HrPQn/l8qqsK62WEs4+jxaXxfVDwxKQIeBefHGtyU4gc0/JTF/53zNjr", + "SPOF4O9zfsdLcDgLGGCTBhrJkC4uTv1mrmIaOxI0caWNjbvJTQaI9qS5CIcyGPuDpMtnC4TcMHvVvQHc", + "Lb4a+OHNs6nQg38b3ChdiV3UgyE93DfDPgzJKfjBni+Esh5pD/6+kkdys8WdPHLyf/NIQK3vkWBIzyMI", + "Hpmx4WqRcdTPl2FCZPZ21xOjq3DOu/pn6K4wOW7cpbDGFVjQbouncIfG7yzxCvW1ThfoSQu0fqH06Ss6", + "oTv13u6EVAOuJsX3J9+PT2EjuZAWzX3H2/Va2HavPJrkj/GfwL5O5PcM/y9F3N2jX3hsuTyYurqsVuNn", + "1w9+/Su75PnPw20z0pd2NgaYkWONCdk9FR+B1K0Ei147yOOpA58+xf+txr1/VYugc/z+/TUiYJIVQpoN", + "X0l6/zt0rf2k/MI0v6pF/y5cI3OQ88WclaNFdHP6nge6F3gG95v1gSMusTbrTjMaPDw8VY7sUEyN5GB2", + "QjVQvkBc9xjGDTFPhqFbWIaPwOvp4xD6gN6RIVIBRWQNygHgc1nuAPw7R/XKCorOnCuDubPpELx5wCJh", + "Hf9MkI8jraS2O0B9KUNN/uKw3qerb31T3LuzRw6oNNx49iqvI/2gjGkiZRdnrmlfWfYMHuzlbopknccz", + "lzmqR3EQ4Hc11LAd6395sm8QZm/YKMQpvO9aVGuY7yIo248nDcbKTVOYq0DwRyvztfvcgPPOvUxy3EG5", + "1frqlve6f2gc52KR9vW4PvdK+qW5e6zv6fj8ErRhxsaHZFIfhffWQFF4XYB045thEITv0NtCYGr9c4Id", + "LrXO84NXX6p0H1PsUq0EBpTwytSL4RF1fLiZfNMwHOCj5r1XPkmv3fI3Ova7Ds8YN+eHJ3qWeZ6xUm0C", + "WqpvFmf/vmEbzlL1C+oHqW+5xNSghwXjgJQG/wJGlP4j9BY3xOc5CcSxr+NnlxdFfClVTAuHVBQ6+Kwf", + "lAof0CsQ1n/0iDMfBP7udJSNj5o509OwP0TGasCVM8Vxa7CawT3ma27f/Q15Y1kXi7K1MmvGUJgNObNP", + "D/zu/qGBWUt4gJnxlBkprv9CTIR3J0wKL6BpeqIAf+YMecNXakQWQG5NljF+Zx6y/lpzy45iXKQwyVmf", + "ImEo4m14KsDZHMiS8Dx7DJ8h948uAB+wJYvkMwr3wKXykRDfeSf8HFlGxpkQ0gbU5k4cJgRMy3rcrJti", + "9Wn13wAAAP//E5YRdaU1AAA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/internal/runtime/docker/backend.go b/internal/runtime/docker/backend.go index dd1e4513..118a2c9a 100644 --- a/internal/runtime/docker/backend.go +++ b/internal/runtime/docker/backend.go @@ -474,6 +474,12 @@ func (b *Backend) emptyRoot(ctx context.Context, root string) error { }) } +func (b *Backend) prepareWritableRoot(ctx context.Context, root string) error { + return b.withHelperContainer(ctx, root, func(containerID string) error { + return b.runContainerCommand(ctx, containerID, []string{"sh", "-c", "mkdir -p /scroll/data /scroll/.druid && chmod -R a+rwX /scroll"}) + }) +} + func (b *Backend) runWorkerRootCommand(ctx context.Context, root string, command []string, registryCredentials []domain.RegistryCredential) error { if b.config.WorkerImage == "" { return fmt.Errorf("docker worker image is required; set --docker-worker-image or DRUID_DOCKER_WORKER_IMAGE") @@ -485,6 +491,9 @@ func (b *Backend) runWorkerRootCommand(ctx context.Context, root string, command if err := b.pullImage(ctx, b.config.WorkerImage); err != nil { return err } + if err := b.prepareWritableRoot(ctx, root); err != nil { + return err + } registryConfig, err := json.Marshal(struct { Registries []domain.RegistryCredential `json:"registries"` }{Registries: registryCredentials}) @@ -621,6 +630,7 @@ func (b *Backend) withHelperContainer(ctx context.Context, root string, fn func( name := fmt.Sprintf("druid-helper-%s-%d", rootHash(root), time.Now().UnixNano()) created, err := b.client.ContainerCreate(ctx, &container.Config{ Image: b.config.WorkerImage, + User: "0", Entrypoint: []string{"/bin/sh", "-c"}, Cmd: []string{"sleep 300"}, Labels: map[string]string{ @@ -678,6 +688,9 @@ func (b *Backend) SpawnPullWorker(ctx context.Context, action ports.RuntimeWorke if err := b.pullImage(ctx, b.config.WorkerImage); err != nil { return err } + if err := b.prepareWritableRoot(ctx, root); err != nil { + return err + } registryConfig, err := json.Marshal(struct { Registries []domain.RegistryCredential `json:"registries"` }{Registries: action.RegistryCredentials}) diff --git a/internal/runtime/docker/state_store.go b/internal/runtime/docker/state_store.go index 6f1e5dbc..3b7b72c0 100644 --- a/internal/runtime/docker/state_store.go +++ b/internal/runtime/docker/state_store.go @@ -19,6 +19,24 @@ type StateStore struct { dbPath string } +const scrollsTableSQL = ` + CREATE TABLE IF NOT EXISTS scrolls ( + id TEXT PRIMARY KEY, + owner_id TEXT NOT NULL DEFAULT '', + artifact TEXT NOT NULL, + artifact_digest TEXT NOT NULL DEFAULT '', + root TEXT NOT NULL, + scroll_name TEXT NOT NULL, + scroll_yaml TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL, + last_error TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + commands_json TEXT NOT NULL DEFAULT '{}', + routing_json TEXT NOT NULL DEFAULT '[]' + ) +` + func NewStateStore(stateDir string) (*StateStore, error) { if stateDir == "" { defaultStateDir, err := utils.DefaultRuntimeStateDir() @@ -194,23 +212,11 @@ func (s *StateStore) open() (*sql.DB, error) { db.Close() return nil, err } - if _, err := db.Exec(` - CREATE TABLE IF NOT EXISTS scrolls ( - id TEXT PRIMARY KEY, - owner_id TEXT NOT NULL DEFAULT '', - artifact TEXT NOT NULL, - artifact_digest TEXT NOT NULL DEFAULT '', - root TEXT NOT NULL, - scroll_name TEXT NOT NULL, - scroll_yaml TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL, - last_error TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - commands_json TEXT NOT NULL DEFAULT '{}', - routing_json TEXT NOT NULL DEFAULT '[]' - ) - `); err != nil { + if _, err := db.Exec(scrollsTableSQL); err != nil { + db.Close() + return nil, err + } + if err := dropLegacyScrollsTable(db); err != nil { db.Close() return nil, err } @@ -237,6 +243,21 @@ func (s *StateStore) open() (*sql.DB, error) { return db, nil } +func dropLegacyScrollsTable(db *sql.DB) error { + columns, err := tableColumns(db, "scrolls") + if err != nil { + return err + } + if !columns["scroll_root"] && !columns["data_root"] { + return nil + } + if _, err := db.Exec(`DROP TABLE scrolls`); err != nil { + return err + } + _, err = db.Exec(scrollsTableSQL) + return err +} + func ensureColumn(db *sql.DB, table string, column string, definition string) error { exists, err := tableHasColumn(db, table, column) if err != nil || exists { @@ -247,11 +268,20 @@ func ensureColumn(db *sql.DB, table string, column string, definition string) er } func tableHasColumn(db *sql.DB, table string, column string) (bool, error) { - rows, err := db.Query(fmt.Sprintf("PRAGMA table_info(%s)", table)) + columns, err := tableColumns(db, table) if err != nil { return false, err } + return columns[column], nil +} + +func tableColumns(db *sql.DB, table string) (map[string]bool, error) { + rows, err := db.Query(fmt.Sprintf("PRAGMA table_info(%s)", table)) + if err != nil { + return nil, err + } defer rows.Close() + columns := map[string]bool{} for rows.Next() { var cid int var name string @@ -260,16 +290,14 @@ func tableHasColumn(db *sql.DB, table string, column string) (bool, error) { var defaultValue sql.NullString var pk int if err := rows.Scan(&cid, &name, &columnType, ¬Null, &defaultValue, &pk); err != nil { - return false, err - } - if name == column { - return true, nil + return nil, err } + columns[name] = true } if err := rows.Err(); err != nil { - return false, err + return nil, err } - return false, nil + return columns, nil } type runtimeScrollScanner interface { diff --git a/internal/runtime/docker/state_store_test.go b/internal/runtime/docker/state_store_test.go index a13e86df..6443a0f9 100644 --- a/internal/runtime/docker/state_store_test.go +++ b/internal/runtime/docker/state_store_test.go @@ -1,10 +1,12 @@ package docker import ( + "database/sql" "path/filepath" "testing" "github.com/highcard-dev/daemon/internal/core/domain" + _ "modernc.org/sqlite" ) func TestStateStorePersistsCommandStatuses(t *testing.T) { @@ -69,3 +71,46 @@ func TestStateStoreUsesSingleRuntimeRoot(t *testing.T) { t.Fatalf("Root = %s, want %s", got, want) } } + +func TestStateStoreDropsLegacyScrollRootTable(t *testing.T) { + stateDir := t.TempDir() + db, err := sql.Open("sqlite", filepath.Join(stateDir, "state.db")) + if err != nil { + t.Fatal(err) + } + _, err = db.Exec(` + CREATE TABLE scrolls ( + id TEXT PRIMARY KEY, + artifact TEXT NOT NULL, + scroll_root TEXT NOT NULL, + data_root TEXT NOT NULL, + scroll_name TEXT NOT NULL, + status TEXT NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + INSERT INTO scrolls (id, artifact, scroll_root, data_root, scroll_name, status, created_at, updated_at) + VALUES ('old', 'artifact', 'docker-volume://old-root', 'docker-volume://old-data', 'old-scroll', 'created', '2026-05-16T00:00:00Z', '2026-05-16T00:00:00Z'); + `) + if err != nil { + t.Fatal(err) + } + db.Close() + + store, err := NewStateStore(stateDir) + if err != nil { + t.Fatal(err) + } + if _, err := store.GetScroll("old"); err == nil { + t.Fatal("legacy scroll survived schema reset") + } + if err := store.CreateScroll(&domain.RuntimeScroll{ + ID: "new", + Artifact: "artifact", + Root: "docker-volume://new-root", + ScrollName: "new-scroll", + Status: domain.RuntimeScrollStatusCreated, + }); err != nil { + t.Fatal(err) + } +}