Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions agent/internal/agent/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func (a *Agent) ProcessBuild(item agenthttp.WorkQueueItem) error {
}
log.Printf("[build] starting build %s for commit %s (timeout: %d minutes)", Truncate(payload.BuildID, 8), Truncate(buildDetails.Build.CommitSha, 8), timeoutMinutes)

if err := a.Client.UpdateBuildStatus(payload.BuildID, "cloning", ""); err != nil {
if err := a.Client.UpdateBuildStatus(payload.BuildID, "cloning", "", ""); err != nil {
log.Printf("[build] failed to update status to cloning: %v", err)
}

Expand All @@ -165,6 +165,7 @@ func (a *Agent) ProcessBuild(item agenthttp.WorkQueueItem) error {
CloneURL: buildDetails.CloneURL,
CommitSha: buildDetails.Build.CommitSha,
Branch: buildDetails.Build.Branch,
ImageRepository: buildDetails.ImageRepository,
ImageURI: buildDetails.ImageURI,
ServiceID: buildDetails.Build.ServiceID,
ProjectID: buildDetails.Build.ProjectID,
Expand All @@ -174,7 +175,7 @@ func (a *Agent) ProcessBuild(item agenthttp.WorkQueueItem) error {
}

onStatusChange := func(status string) {
if err := a.Client.UpdateBuildStatus(payload.BuildID, status, ""); err != nil {
if err := a.Client.UpdateBuildStatus(payload.BuildID, status, "", buildConfig.ResolvedCommitSha); err != nil {
log.Printf("[build] failed to update status to %s: %v", status, err)
}
}
Expand All @@ -184,14 +185,14 @@ func (a *Agent) ProcessBuild(item agenthttp.WorkQueueItem) error {
err = a.Builder.Build(ctx, buildConfig, checkCancelled, onStatusChange)
if err != nil {
log.Printf("[build] build %s failed: %v", Truncate(payload.BuildID, 8), err)
if updateErr := a.Client.UpdateBuildStatus(payload.BuildID, "failed", err.Error()); updateErr != nil {
if updateErr := a.Client.UpdateBuildStatus(payload.BuildID, "failed", err.Error(), buildConfig.ResolvedCommitSha); updateErr != nil {
log.Printf("[build] failed to update status to failed: %v", updateErr)
}
return err
}

log.Printf("[build] build %s completed successfully", Truncate(payload.BuildID, 8))
if err := a.Client.UpdateBuildStatus(payload.BuildID, "completed", ""); err != nil {
if err := a.Client.UpdateBuildStatus(payload.BuildID, "completed", "", buildConfig.ResolvedCommitSha); err != nil {
log.Printf("[build] failed to update status to completed: %v", err)
}

Expand Down
48 changes: 38 additions & 10 deletions agent/internal/build/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,18 @@ import (
)

type Config struct {
BuildID string
CloneURL string
CommitSha string
Branch string
ImageURI string
ServiceID string
ProjectID string
RootDir string
Secrets map[string]string
TargetPlatforms []string
BuildID string
CloneURL string
CommitSha string
Branch string
ImageRepository string
ImageURI string
ResolvedCommitSha string
ServiceID string
ProjectID string
RootDir string
Secrets map[string]string
TargetPlatforms []string
}

type LogSender interface {
Expand Down Expand Up @@ -68,6 +70,11 @@ func (b *Builder) Build(ctx context.Context, config *Config, checkCancelled func
return fmt.Errorf("clone failed: %w", err)
}

if config.CommitSha == "HEAD" && config.ImageRepository != "" && config.ResolvedCommitSha != "" {
config.ImageURI = fmt.Sprintf("%s:%s", config.ImageRepository, config.ResolvedCommitSha)
b.sendLog(config, fmt.Sprintf("Resolved image tag %s", config.ImageURI))
}

if checkCancelled() {
return fmt.Errorf("build cancelled")
}
Expand Down Expand Up @@ -165,9 +172,30 @@ func (b *Builder) clone(ctx context.Context, config *Config, buildDir string) er
}

b.sendLog(config, "Clone completed")
resolvedCommitSha, err := b.resolveCommitSha(ctx, config, buildDir)
if err != nil {
return err
}
config.ResolvedCommitSha = resolvedCommitSha
b.sendLog(config, fmt.Sprintf("Resolved commit %s", truncateStr(resolvedCommitSha, 8)))
return nil
}

func (b *Builder) resolveCommitSha(ctx context.Context, config *Config, buildDir string) (string, error) {
cmd := exec.CommandContext(ctx, "git", "-C", buildDir, "rev-parse", "HEAD")
output, err := b.runCommand(cmd, config)
if err != nil {
return "", fmt.Errorf("failed to resolve commit sha: %s: %w", output, err)
}

resolvedCommitSha := strings.TrimSpace(output)
if resolvedCommitSha == "" {
return "", fmt.Errorf("resolved commit sha is empty")
}

return resolvedCommitSha, nil
}

func (b *Builder) buildAndPush(ctx context.Context, config *Config, buildDir string) error {
contextDir := buildDir
if config.RootDir != "" {
Expand Down
6 changes: 5 additions & 1 deletion agent/internal/http/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ type BuildDetails struct {
ProjectID string `json:"projectId"`
} `json:"build"`
CloneURL string `json:"cloneUrl"`
ImageRepository string `json:"imageRepository"`
ImageURI string `json:"imageUri"`
RootDir string `json:"rootDir"`
Secrets map[string]string `json:"secrets"`
Expand Down Expand Up @@ -301,13 +302,16 @@ func (c *Client) GetBuild(buildID string) (*BuildDetails, error) {
return &result, nil
}

func (c *Client) UpdateBuildStatus(buildID, status, errorMsg string) error {
func (c *Client) UpdateBuildStatus(buildID, status, errorMsg, resolvedCommitSha string) error {
payload := map[string]string{
"status": status,
}
if errorMsg != "" {
payload["error"] = errorMsg
}
if resolvedCommitSha != "" {
payload["resolvedCommitSha"] = resolvedCommitSha
}

body, err := json.Marshal(payload)
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions docs/deployments/compose.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,5 @@ Each service in the compose file becomes a separate Techulus Cloud service withi
## Stateful Services

If a service in the compose file defines volumes, it is automatically marked as stateful. Stateful services are limited to 1 replica and pinned to a single server.

Imported stateful services use single-server local storage. Techulus Cloud does not currently provide replicated volumes or automatic failover for these services. Avoid importing production databases unless you accept the single-node storage risk and have an external backup and recovery plan.
4 changes: 2 additions & 2 deletions docs/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@ All communication between nodes happens over an encrypted **WireGuard mesh netwo
- **Build from source** — push code and build with Railpack or your own Dockerfile. Or deploy pre-built images.
- **GitHub auto-deploy** — connect a repo and deploy on every push.
- **Automatic HTTPS** — TLS certificates are provisioned and renewed automatically via Let's Encrypt.
- **Persistent volumes** — attach named volumes for stateful workloads with scheduled backups to S3-compatible storage.
- **Persistent volumes** — attach named local volumes for stateful workloads with scheduled backups to S3-compatible storage. Replicated storage and HA failover are not yet supported.
- **Service discovery** — containers resolve each other by name using `.internal` domains.
- **Multi-environment** — run production, staging, and dev within the same project.
- **GeoDNS** — route users to the nearest proxy node with automatic failover.
- **TCP/UDP proxy** — expose non-HTTP services like databases or game servers.
- **TCP/UDP proxy** — expose non-HTTP services like game servers or custom protocols.

## Next Steps

Expand Down
2 changes: 2 additions & 0 deletions docs/infrastructure/backups.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ description: "Automated database backups to S3-compatible storage."

Techulus Cloud can automatically back up databases running in your containers to S3-compatible storage. Backups are triggered on a schedule or manually from the web UI.

> **Backups are not high availability:** Backups provide point-in-time disaster recovery only. If a server hosting a stateful service is lost, any writes after the last successful backup may be lost, and recovery requires restoring data before the service can run elsewhere. Backups do not provide replicated storage or automatic failover.

## Supported Databases

The agent detects the database type from the container image name and runs the appropriate dump command:
Expand Down
2 changes: 2 additions & 0 deletions docs/networking/service-discovery.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,5 @@ If you have a `postgres` service and a `web` service, the web service can connec
```
postgres://user:pass@postgres.internal:5432/mydb
```

For production databases, prefer an external managed database or another HA database setup. Techulus Cloud stateful volumes are single-server local storage and do not currently provide replicated storage or automatic failover.
8 changes: 5 additions & 3 deletions docs/networking/tcp-udp-proxy.mdx
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
---
title: "TCP/UDP Proxy"
description: "Expose non-HTTP services like databases and game servers."
description: "Expose non-HTTP services like game servers and custom protocols."
---

Not every service speaks HTTP. Techulus Cloud supports exposing raw TCP and UDP ports through proxy nodes for services like databases, game servers, or custom protocols.
Not every service speaks HTTP. Techulus Cloud supports exposing raw TCP and UDP ports through proxy nodes for services like game servers or custom protocols.

> **Database exposure warning:** Database ports should usually remain private on the WireGuard network. Public database access increases security risk, and Techulus Cloud does not currently provide HA storage or automatic failover for production databases.

## Configuration

Expand All @@ -20,7 +22,7 @@ Traffic is routed from the proxy node's external port through the WireGuard mesh

## TLS Passthrough

For TCP services that handle their own TLS (e.g., a database with native SSL), enable **TLS passthrough**. This forwards the encrypted connection directly to the container without Traefik terminating TLS.
For TCP services that handle their own TLS, enable **TLS passthrough**. This forwards the encrypted connection directly to the container without Traefik terminating TLS.

## Firewall

Expand Down
5 changes: 4 additions & 1 deletion docs/services/scaling.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,15 @@ When auto-placement is disabled, you manually configure how many replicas run on

## Server Pinning

Stateful services (those with [volumes](/services/volumes)) are automatically pinned to a single server. This ensures the container always has access to its persistent data.
Stateful services (those with [volumes](/services/volumes)) are automatically pinned to a single server. This ensures the container always mounts the same local data path and avoids accidentally starting on a server that does not have the volume.

Pinning does not provide high availability. Volume data is not replicated across servers, and if the pinned server is lost, the service must be recovered from completed backups.

You can also manually lock any service to a specific server by setting the locked server. This is useful for workloads that need to run on a particular machine.

## Limitations

- Stateful services are limited to 1 replica.
- Stateful services cannot use auto-placement — they are always pinned to their locked server.
- Stateful services do not automatically fail over to another server.
- Maximum 10 replicas per service.
8 changes: 6 additions & 2 deletions docs/services/volumes.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ description: "Persistent storage for stateful services."

Volumes provide persistent storage that survives container restarts and redeployments.

> **Stateful storage warning:** Volumes are stored on a single server's local filesystem. Techulus Cloud does not currently provide replicated volumes, automatic storage failover, or high availability for stateful services. If the server hosting a volume is lost, data can only be recovered from completed backups. We do not recommend running production databases on Techulus Cloud until HA storage and failover are implemented, unless you accept this risk and maintain an external recovery plan.

## Adding Volumes

Each volume has a name and a container path:
Expand All @@ -14,7 +16,7 @@ Each volume has a name and a container path:
| Name | Unique identifier for the volume |
| Container path | Where the volume is mounted inside the container (e.g., `/var/lib/postgresql/data`) |

When you add a volume, the service automatically becomes **stateful**. Stateful services are locked to a single server and limited to 1 replica. When the last volume is removed, the service reverts to stateless.
When you add a volume, the service automatically becomes **stateful**. Stateful services are locked to a single server and limited to 1 replica so the container always mounts the same local data path. When the last volume is removed, the service reverts to stateless.

## Volume Backups

Expand Down Expand Up @@ -44,4 +46,6 @@ You can restore a volume from any completed backup. The restore process download

- Services with volumes are locked to a single server — they cannot be auto-placed across multiple nodes.
- Replica count is fixed at 1 for stateful services.
- Volume data lives on the host filesystem. If the server is lost, data is only recoverable from backups.
- Volume data lives on the host filesystem and is not replicated to other servers.
- If the server is lost, data is only recoverable from completed backups.
- Backups are point-in-time recovery, not high availability or automatic failover.
16 changes: 4 additions & 12 deletions web/actions/builds.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"use server";

import { eq, desc } from "drizzle-orm";
import { eq } from "drizzle-orm";
import { db } from "@/db";
import { builds, githubRepos, services } from "@/db/schema";
import { inngest } from "@/lib/inngest/client";
Expand Down Expand Up @@ -93,22 +93,14 @@ export async function triggerBuild(
.where(eq(githubRepos.serviceId, serviceId));

if (githubRepo) {
const [latestBuild] = await db
.select()
.from(builds)
.where(eq(builds.serviceId, serviceId))
.orderBy(desc(builds.createdAt))
.limit(1);

await inngest.send(
inngestEvents.buildTrigger.create({
serviceId,
trigger,
githubRepoId: githubRepo.id,
commitSha: latestBuild?.commitSha || "HEAD",
commitMessage: latestBuild?.commitMessage || triggerMessage,
branch: latestBuild?.branch || githubRepo.deployBranch || "main",
author: latestBuild?.author ?? undefined,
commitSha: "HEAD",
commitMessage: triggerMessage,
branch: githubRepo.deployBranch || githubRepo.defaultBranch || "main",
}),
);

Expand Down
20 changes: 11 additions & 9 deletions web/app/api/v1/agent/builds/[id]/route.ts
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
import { NextRequest, NextResponse } from "next/server";
import { and, eq } from "drizzle-orm";
import { type NextRequest, NextResponse } from "next/server";
import { db } from "@/db";
import { getSetting } from "@/db/queries";
import {
builds,
githubRepos,
githubInstallations,
services,
githubRepos,
projects,
secrets,
services,
} from "@/db/schema";
import { eq, and } from "drizzle-orm";
import { verifyAgentRequest } from "@/lib/agent-auth";
import { getInstallationToken, buildCloneUrl } from "@/lib/github";
import { getSetting } from "@/db/queries";
import { buildCloneUrl, getInstallationToken } from "@/lib/github";
import {
SETTING_KEYS,
DEFAULT_BUILD_TIMEOUT_MINUTES,
SETTING_KEYS,
} from "@/lib/settings-keys";

export async function GET(
Expand Down Expand Up @@ -75,8 +75,9 @@ export async function GET(
{ status: 500 },
);
}
const imageRepository = `${registryHost}/${project.id}/${service.id}`;
const commitSha = build.commitSha === "HEAD" ? "latest" : build.commitSha;
const imageUri = `${registryHost}/${project.id}/${service.id}:${commitSha}`;
const imageUri = `${imageRepository}:${commitSha}`;

let cloneUrl: string;

Expand Down Expand Up @@ -131,7 +132,7 @@ export async function GET(
} else if (service.githubRepoUrl) {
cloneUrl = service.githubRepoUrl;
if (!cloneUrl.endsWith(".git")) {
cloneUrl = cloneUrl + ".git";
cloneUrl = `${cloneUrl}.git`;
}
} else {
return NextResponse.json(
Expand Down Expand Up @@ -168,6 +169,7 @@ export async function GET(
projectId: project.id,
},
cloneUrl,
imageRepository,
imageUri,
rootDir: service.githubRootDir || "",
secrets: secretsMap,
Expand Down
Loading
Loading