diff --git a/.github/workflows/harness-image.yml b/.github/workflows/harness-image.yml new file mode 100644 index 00000000..474c4c68 --- /dev/null +++ b/.github/workflows/harness-image.yml @@ -0,0 +1,61 @@ +name: Harness Worker Image + +on: + push: + branches: [main, certification-worker] + paths: + - "Dockerfile" + - "harness/**" + - "src/**" + - "package.json" + - "package-lock.json" + - ".github/workflows/harness-image.yml" + release: + types: [published] + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/conductor-oss/javascript-sdk/harness-worker + tags: | + type=raw,value=latest + type=raw,value=${{ github.event.release.tag_name }},enable=${{ github.event_name == 'release' }} + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: . + file: ./Dockerfile + target: harness + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..3c8c1e90 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +FROM node:24-alpine AS build +WORKDIR /package +COPY package*.json ./ +RUN npm ci +COPY . . +RUN npm run build + +FROM build AS harness-build +RUN npx tsup harness/main.ts \ + --outDir /app \ + --format cjs \ + --target node24 \ + --no-splitting + +FROM node:24-alpine AS harness-deps +WORKDIR /package +COPY package*.json ./ +RUN npm ci --omit=dev + +FROM node:24-alpine AS harness +RUN adduser -D -u 65532 nonroot +USER nonroot +WORKDIR /app +COPY --from=harness-deps /package/node_modules /app/node_modules +COPY --from=harness-deps /package/package.json /app/package.json +COPY --from=harness-build /app/main.js /app/main.js +ENTRYPOINT ["node", "main.js"] diff --git a/harness/README.md b/harness/README.md new file mode 100644 index 00000000..e97bc9d2 --- /dev/null +++ b/harness/README.md @@ -0,0 +1,78 @@ +# JS SDK Docker Harness + +Two Docker targets built from the root `Dockerfile`: an **SDK build** and a **long-running worker harness**. + +## Worker Harness + +A self-feeding worker that runs indefinitely. On startup it registers five simulated tasks (`js_worker_0` through `js_worker_4`) and the `js_simulated_tasks_workflow`, then runs two background services: + +- **WorkflowGovernor** -- starts a configurable number of `js_simulated_tasks_workflow` instances per second (default 2), indefinitely. +- **SimulatedTaskWorkers** -- five task handlers, each with a codename and a default sleep duration. Each worker supports configurable delay types, failure simulation, and output generation via task input parameters. The workflow chains them in sequence: quickpulse (1s) → whisperlink (2s) → shadowfetch (3s) → ironforge (4s) → deepcrawl (5s). + +### Building Locally + +```bash +docker build --target harness -t js-sdk-harness . +``` + +### Multiplatform Build and Push + +To build for both `linux/amd64` and `linux/arm64` and push to GHCR: + +```bash +# One-time: create a buildx builder if you don't have one +docker buildx create --name multiarch --use --bootstrap + +# Build and push +docker buildx build \ + --platform linux/amd64,linux/arm64 \ + --target harness \ + -t ghcr.io/conductor-oss/javascript-sdk/harness-worker:latest \ + --push . +``` + +> **Note:** Multi-platform builds require `docker buildx` and a builder that supports cross-compilation. On macOS this works out of the box with Docker Desktop. On Linux you may need to install QEMU user-space emulators: +> +> ```bash +> docker run --privileged --rm tonistiigi/binfmt --install all +> ``` + +### Running + +```bash +docker run -d \ + -e CONDUCTOR_SERVER_URL=https://your-cluster.example.com/api \ + -e CONDUCTOR_AUTH_KEY=$CONDUCTOR_AUTH_KEY \ + -e CONDUCTOR_AUTH_SECRET=$CONDUCTOR_AUTH_SECRET \ + -e HARNESS_WORKFLOWS_PER_SEC=4 \ + js-sdk-harness +``` + +You can also run the harness locally without Docker: + +```bash +export CONDUCTOR_SERVER_URL=https://your-cluster.example.com/api +export CONDUCTOR_AUTH_KEY=$CONDUCTOR_AUTH_KEY +export CONDUCTOR_AUTH_SECRET=$CONDUCTOR_AUTH_SECRET + +npx tsx harness/main.ts +``` + +Override defaults with environment variables as needed: + +```bash +HARNESS_WORKFLOWS_PER_SEC=4 HARNESS_BATCH_SIZE=10 npx tsx harness/main.ts +``` + +All resource names use a `js_` prefix so multiple SDK harnesses (C#, Python, Go, etc.) can coexist on the same cluster. + +### Environment Variables + +| Variable | Required | Default | Description | +|---|---|---|---| +| `CONDUCTOR_SERVER_URL` | yes | -- | Conductor API base URL | +| `CONDUCTOR_AUTH_KEY` | no | -- | Orkes auth key | +| `CONDUCTOR_AUTH_SECRET` | no | -- | Orkes auth secret | +| `HARNESS_WORKFLOWS_PER_SEC` | no | 2 | Workflows to start per second | +| `HARNESS_BATCH_SIZE` | no | 20 | Number of tasks each worker polls per batch | +| `HARNESS_POLL_INTERVAL_MS` | no | 100 | Milliseconds between poll cycles | diff --git a/harness/main.ts b/harness/main.ts new file mode 100644 index 00000000..f3c077b8 --- /dev/null +++ b/harness/main.ts @@ -0,0 +1,122 @@ +import { + OrkesClients, + ConductorWorkflow, + TaskHandler, + simpleTask, +} from "../src/sdk"; +import { MetadataResource } from "../src/open-api/generated"; +import type { ConductorWorker } from "../src/sdk/clients/worker/types"; +import { SimulatedTaskWorker } from "./simulatedTaskWorker"; +import { WorkflowGovernor } from "./workflowGovernor"; + +const WORKFLOW_NAME = "js_simulated_tasks_workflow"; + +const SIMULATED_WORKERS: { + taskName: string; + codename: string; + sleepSeconds: number; +}[] = [ + { taskName: "js_worker_0", codename: "quickpulse", sleepSeconds: 1 }, + { taskName: "js_worker_1", codename: "whisperlink", sleepSeconds: 2 }, + { taskName: "js_worker_2", codename: "shadowfetch", sleepSeconds: 3 }, + { taskName: "js_worker_3", codename: "ironforge", sleepSeconds: 4 }, + { taskName: "js_worker_4", codename: "deepcrawl", sleepSeconds: 5 }, +]; + +function envIntOrDefault(key: string, defaultVal: number): number { + const s = process.env[key]; + if (!s) return defaultVal; + const v = parseInt(s, 10); + return isNaN(v) ? defaultVal : v; +} + +async function registerMetadata( + client: Awaited>, + workflowClient: ReturnType, +): Promise { + const taskDefs = SIMULATED_WORKERS.map((def) => ({ + name: def.taskName, + description: `JS SDK harness simulated task (${def.codename}, default delay ${def.sleepSeconds}s)`, + retryCount: 1, + timeoutSeconds: 300, + responseTimeoutSeconds: 300, + totalTimeoutSeconds: 0, + })); + + await MetadataResource.registerTaskDef({ + client, + body: taskDefs, + }); + + const wf = new ConductorWorkflow(workflowClient, WORKFLOW_NAME) + .version(1) + .description("JS SDK harness simulated task workflow") + .ownerEmail("js-sdk-harness@conductor.io"); + + for (const def of SIMULATED_WORKERS) { + wf.add(simpleTask(def.codename, def.taskName, {})); + } + + await wf.register(true); + + console.log( + `Registered workflow ${WORKFLOW_NAME} with ${SIMULATED_WORKERS.length} tasks`, + ); +} + +async function main(): Promise { + const clients = await OrkesClients.from(); + const workflowClient = clients.getWorkflowClient(); + const client = clients.getClient(); + + await registerMetadata(client, workflowClient); + + const workflowsPerSec = envIntOrDefault("HARNESS_WORKFLOWS_PER_SEC", 2); + const batchSize = envIntOrDefault("HARNESS_BATCH_SIZE", 20); + const pollIntervalMs = envIntOrDefault("HARNESS_POLL_INTERVAL_MS", 100); + + const workers: ConductorWorker[] = SIMULATED_WORKERS.map((def) => { + const sim = new SimulatedTaskWorker( + def.taskName, + def.codename, + def.sleepSeconds, + batchSize, + pollIntervalMs, + ); + return { + taskDefName: sim.taskName, + execute: sim.execute.bind(sim), + concurrency: sim.batchSize, + pollInterval: sim.pollInterval, + }; + }); + + const handler = new TaskHandler({ + client, + workers, + scanForDecorated: false, + }); + await handler.startWorkers(); + + const governor = new WorkflowGovernor( + workflowClient, + WORKFLOW_NAME, + workflowsPerSec, + ); + governor.start(); + + const shutdown = async () => { + console.log("Shutting down..."); + governor.stop(); + await handler.stopWorkers(); + process.exit(0); + }; + + process.on("SIGINT", shutdown); + process.on("SIGTERM", shutdown); +} + +main().catch((err) => { + console.error("Fatal error:", err); + process.exit(1); +}); diff --git a/harness/manifests/README.md b/harness/manifests/README.md new file mode 100644 index 00000000..5f745923 --- /dev/null +++ b/harness/manifests/README.md @@ -0,0 +1,132 @@ +# Kubernetes Manifests + +This directory contains Kubernetes manifests for deploying the JS SDK harness worker to the certification clusters. + +## Prerequisites + +**Set your namespace environment variable:** +```bash +export NS=your-namespace-here +``` + +All kubectl commands below use `-n $NS` to specify the namespace. The manifests intentionally do not include hardcoded namespaces. + +**Note:** The harness worker images are published as public packages on GHCR and do not require authentication to pull. No image pull secrets are needed. + +## Files + +| File | Description | +|---|---| +| `deployment.yaml` | Deployment (single file, works on all clusters) | +| `configmap-aws.yaml` | Conductor URL + auth key for certification-aws | +| `configmap-azure.yaml` | Conductor URL + auth key for certification-az | +| `configmap-gcp.yaml` | Conductor URL + auth key for certification-gcp | +| `secret-conductor.yaml` | Conductor auth secret (placeholder template) | + +## Quick Start + +### 1. Create the Conductor Auth Secret + +The `CONDUCTOR_AUTH_SECRET` must be created as a Kubernetes secret before deploying. + +```bash +kubectl create secret generic conductor-credentials \ + --from-literal=auth-secret=YOUR_AUTH_SECRET \ + -n $NS +``` + +If the `conductor-credentials` secret already exists in the namespace (e.g. from the e2e-testrunner-worker), it can be reused as-is. + +See `secret-conductor.yaml` for more details. + +### 2. Apply the ConfigMap for Your Cluster + +```bash +# AWS +kubectl apply -f manifests/configmap-aws.yaml -n $NS + +# Azure +kubectl apply -f manifests/configmap-azure.yaml -n $NS + +# GCP +kubectl apply -f manifests/configmap-gcp.yaml -n $NS +``` + +### 3. Deploy + +```bash +kubectl apply -f manifests/deployment.yaml -n $NS +``` + +### 4. Verify + +```bash +# Check pod status +kubectl get pods -n $NS -l app=js-sdk-harness-worker + +# Watch logs +kubectl logs -n $NS -l app=js-sdk-harness-worker -f +``` + +## Building and Pushing the Image + +From the repository root: + +```bash +# Build the harness target and push to GHCR +docker buildx build \ + --platform linux/amd64,linux/arm64 \ + --target harness \ + -t ghcr.io/conductor-oss/javascript-sdk/harness-worker:latest \ + --push . +``` + +After pushing a new image with the same tag, restart the deployment to pull it: + +```bash +kubectl rollout restart deployment/js-sdk-harness-worker -n $NS +kubectl rollout status deployment/js-sdk-harness-worker -n $NS +``` + +## Tuning + +The harness worker accepts these optional environment variables (set in `deployment.yaml`): + +| Variable | Default | Description | +|---|---|---| +| `HARNESS_WORKFLOWS_PER_SEC` | 2 | Workflows to start per second | +| `HARNESS_BATCH_SIZE` | 20 | Tasks each worker polls per batch | +| `HARNESS_POLL_INTERVAL_MS` | 100 | Milliseconds between poll cycles | + +Edit `deployment.yaml` to change these, then re-apply: + +```bash +kubectl apply -f manifests/deployment.yaml -n $NS +``` + +## Troubleshooting + +### Pod not starting + +```bash +kubectl describe pod -n $NS -l app=js-sdk-harness-worker +kubectl logs -n $NS -l app=js-sdk-harness-worker --tail=100 +``` + +### Secret not found + +```bash +kubectl get secret conductor-credentials -n $NS +``` + +## Resource Limits + +Default resource allocation: +- **Memory**: 256Mi (request) / 512Mi (limit) +- **CPU**: 100m (request) / 500m (limit) + +Adjust in `deployment.yaml` based on workload. Higher `HARNESS_WORKFLOWS_PER_SEC` values may need more CPU/memory. + +## Service + +The harness worker does **not** need a Service or Ingress. It connects to Conductor via outbound HTTP polling. All communication is outbound. diff --git a/harness/manifests/configmap-aws.yaml b/harness/manifests/configmap-aws.yaml new file mode 100644 index 00000000..40dd2f83 --- /dev/null +++ b/harness/manifests/configmap-aws.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: js-sdk-harness-config + labels: + app: js-sdk-harness-worker +data: + CONDUCTOR_SERVER_URL: "https://certification-aws.orkesconductor.io/api" + CONDUCTOR_AUTH_KEY: "7ba9d0ec-247b-11f1-8d42-ea3efeda41b2" diff --git a/harness/manifests/configmap-azure.yaml b/harness/manifests/configmap-azure.yaml new file mode 100644 index 00000000..0290123c --- /dev/null +++ b/harness/manifests/configmap-azure.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: js-sdk-harness-config + labels: + app: js-sdk-harness-worker +data: + CONDUCTOR_SERVER_URL: "https://certification-az.orkesconductor.io/api" + CONDUCTOR_AUTH_KEY: "bf170d61-2797-11f1-833e-4ae04d100a03" diff --git a/harness/manifests/configmap-gcp.yaml b/harness/manifests/configmap-gcp.yaml new file mode 100644 index 00000000..c2da884f --- /dev/null +++ b/harness/manifests/configmap-gcp.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: js-sdk-harness-config + labels: + app: js-sdk-harness-worker +data: + CONDUCTOR_SERVER_URL: "https://certification-gcp.orkesconductor.com/api" + CONDUCTOR_AUTH_KEY: "e6c1ac61-286b-11f1-be01-c682b5750c3a" diff --git a/harness/manifests/deployment.yaml b/harness/manifests/deployment.yaml new file mode 100644 index 00000000..e93078ad --- /dev/null +++ b/harness/manifests/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: js-sdk-harness-worker + labels: + app: js-sdk-harness-worker +spec: + replicas: 2 + selector: + matchLabels: + app: js-sdk-harness-worker + template: + metadata: + labels: + app: js-sdk-harness-worker + spec: + containers: + - name: harness + image: ghcr.io/conductor-oss/javascript-sdk/harness-worker:latest + imagePullPolicy: Always + env: + - name: CONDUCTOR_SERVER_URL + valueFrom: + configMapKeyRef: + name: js-sdk-harness-config + key: CONDUCTOR_SERVER_URL + - name: CONDUCTOR_AUTH_KEY + valueFrom: + configMapKeyRef: + name: js-sdk-harness-config + key: CONDUCTOR_AUTH_KEY + - name: CONDUCTOR_AUTH_SECRET + valueFrom: + secretKeyRef: + name: conductor-credentials + key: auth-secret + - name: HARNESS_WORKFLOWS_PER_SEC + value: "1" + - name: HARNESS_BATCH_SIZE + value: "20" + - name: HARNESS_POLL_INTERVAL_MS + value: "100" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + livenessProbe: + exec: + command: ["/bin/sh", "-c", "test -e /proc/1/cmdline"] + initialDelaySeconds: 30 + periodSeconds: 30 + readinessProbe: + exec: + command: ["/bin/sh", "-c", "test -e /proc/1/cmdline"] + initialDelaySeconds: 10 + periodSeconds: 10 + restartPolicy: Always diff --git a/harness/manifests/secret-conductor.yaml b/harness/manifests/secret-conductor.yaml new file mode 100644 index 00000000..c495f23a --- /dev/null +++ b/harness/manifests/secret-conductor.yaml @@ -0,0 +1,37 @@ +# Conductor API Secret +# This secret contains the Conductor AUTH SECRET for authentication. +# The auth key (key ID) is NOT secret and lives in the per-cloud ConfigMap. +# Create this secret before deploying the worker. + +apiVersion: v1 +kind: Secret +metadata: + name: conductor-credentials + # namespace: xxxxx # supply this in kubectl command + labels: + app: js-sdk-harness-worker +type: Opaque +stringData: + # TODO: Replace with your actual Conductor AUTH SECRET (not the key ID) + auth-secret: "YOUR_CONDUCTOR_AUTH_SECRET_HERE" + +--- +# Instructions for creating this secret: +# +# Option 1 - kubectl imperative command (recommended): +# +# kubectl create secret generic conductor-credentials \ +# --from-literal=auth-secret=YOUR_AUTH_SECRET \ +# -n $NS +# +# Option 2 - edit this file and apply: +# +# 1. Replace YOUR_CONDUCTOR_AUTH_SECRET_HERE with the real secret value +# 2. kubectl apply -f manifests/secret-conductor.yaml -n $NS +# +# Note: The auth key (key ID) is not secret and is stored in the per-cloud +# ConfigMap (configmap-aws.yaml, configmap-azure.yaml, configmap-gcp.yaml). +# +# Note: If the e2e-testrunner-worker already runs in the same namespace, the +# conductor-credentials secret may already exist and can be reused as-is +# (same credential, same secret name). diff --git a/harness/simulatedTaskWorker.ts b/harness/simulatedTaskWorker.ts new file mode 100644 index 00000000..e8222321 --- /dev/null +++ b/harness/simulatedTaskWorker.ts @@ -0,0 +1,336 @@ +import crypto from "crypto"; +import type { Task, TaskResult } from "../src/open-api"; + +const ALPHANUMERIC_CHARS = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + +const instanceId: string = + process.env.HOSTNAME ?? crypto.randomBytes(4).toString("hex"); + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +// ── Type-safe input helpers ────────────────────────────────────── + +function toInt(v: unknown): number { + if (typeof v === "number") return Math.trunc(v); + if (typeof v === "string") { + const n = parseInt(v, 10); + return isNaN(n) ? 0 : n; + } + return 0; +} + +function toFloat(v: unknown): number { + if (typeof v === "number") return v; + if (typeof v === "string") { + const n = parseFloat(v); + return isNaN(n) ? 0 : n; + } + return 0; +} + +function toBool(v: unknown): { value: boolean; ok: boolean } { + if (typeof v === "boolean") return { value: v, ok: true }; + if (typeof v === "string") { + const lower = v.toLowerCase(); + if (lower === "true" || lower === "1") return { value: true, ok: true }; + if (lower === "false" || lower === "0") return { value: false, ok: true }; + } + if (typeof v === "number") return { value: v !== 0, ok: true }; + return { value: false, ok: false }; +} + +function getIntOrDefault( + input: Record, + key: string, + defaultVal: number, +): number { + const v = input[key]; + if (v == null) return defaultVal; + return toInt(v); +} + +function getFloatOrDefault( + input: Record, + key: string, + defaultVal: number, +): number { + const v = input[key]; + if (v == null) return defaultVal; + return toFloat(v); +} + +function getStringOrDefault( + input: Record, + key: string, + defaultVal: string, +): string { + const v = input[key]; + if (v == null) return defaultVal; + if (typeof v === "string") return v; + return defaultVal; +} + +function getBoolOrDefault( + input: Record, + key: string, + defaultVal: boolean, +): boolean { + const v = input[key]; + if (v == null) return defaultVal; + const result = toBool(v); + return result.ok ? result.value : defaultVal; +} + +function generateRandomData(size: number): string { + if (size <= 0) return ""; + const buf = Buffer.alloc(size); + for (let i = 0; i < size; i++) { + buf[i] = ALPHANUMERIC_CHARS.charCodeAt( + Math.floor(Math.random() * ALPHANUMERIC_CHARS.length), + ); + } + return buf.toString("ascii"); +} + +// ── SimulatedTaskWorker ────────────────────────────────────────── + +export class SimulatedTaskWorker { + readonly taskName: string; + readonly codename: string; + readonly defaultDelayMs: number; + readonly batchSize: number; + readonly pollInterval: number; + readonly workerId: string; + + constructor( + taskName: string, + codename: string, + sleepSeconds: number, + batchSize = 5, + pollIntervalMs = 1000, + ) { + this.taskName = taskName; + this.codename = codename; + this.defaultDelayMs = sleepSeconds * 1000; + this.batchSize = batchSize; + this.pollInterval = pollIntervalMs; + this.workerId = `${taskName}-${instanceId}`; + + console.log( + `[${this.taskName}] Initialized worker [workerId=${this.workerId}, codename=${this.codename}, batchSize=${this.batchSize}, pollInterval=${pollIntervalMs}ms]`, + ); + } + + async execute( + task: Task, + ): Promise> { + const input = (task.inputData as Record) ?? {}; + const taskId = task.taskId ?? ""; + const taskIndex = getIntOrDefault(input, "taskIndex", -1); + + console.log( + `[${this.taskName}] Starting simulated task [id=${taskId}, index=${taskIndex}, codename=${this.codename}]`, + ); + + const startTime = Date.now(); + + const delayType = getStringOrDefault(input, "delayType", "fixed"); + const minDelay = getIntOrDefault(input, "minDelay", this.defaultDelayMs); + const maxDelay = getIntOrDefault(input, "maxDelay", minDelay + 100); + const meanDelay = getIntOrDefault( + input, + "meanDelay", + Math.trunc((minDelay + maxDelay) / 2), + ); + const stdDeviation = getIntOrDefault(input, "stdDeviation", 30); + const successRate = getFloatOrDefault(input, "successRate", 1.0); + const failureMode = getStringOrDefault(input, "failureMode", "random"); + const outputSize = getIntOrDefault(input, "outputSize", 1024); + + let delayMs = 0; + if (delayType.toLowerCase() !== "wait") { + delayMs = this.calculateDelay( + delayType, + minDelay, + maxDelay, + meanDelay, + stdDeviation, + ); + + console.log( + `[${this.taskName}] Simulated task [id=${taskId}, index=${taskIndex}] sleeping for ${delayMs} ms`, + ); + await sleep(delayMs); + } + + if (!this.shouldTaskSucceed(successRate, failureMode, input)) { + console.log( + `[${this.taskName}] Simulated task [id=${taskId}, index=${taskIndex}] failed as configured`, + ); + return { + status: "FAILED", + outputData: { + error: "Simulated task failure based on configuration", + }, + }; + } + + const elapsed = Date.now() - startTime; + const output = this.generateOutput( + input, + taskId, + taskIndex, + delayMs, + elapsed, + outputSize, + ); + + return { status: "COMPLETED", outputData: output }; + } + + // ── Delay calculation ────────────────────────────────────────── + + private calculateDelay( + delayType: string, + minDelay: number, + maxDelay: number, + meanDelay: number, + stdDeviation: number, + ): number { + switch (delayType.toLowerCase()) { + case "fixed": + return minDelay; + + case "random": { + const spread = Math.max(1, maxDelay - minDelay + 1); + return minDelay + Math.floor(Math.random() * spread); + } + + case "normal": { + const gaussian = this.nextGaussian(); + const delay = Math.round(meanDelay + gaussian * stdDeviation); + return Math.max(1, delay); + } + + case "exponential": { + const exp = -meanDelay * Math.log(1 - Math.random()); + return Math.max(minDelay, Math.min(maxDelay, Math.trunc(exp))); + } + + default: + return minDelay; + } + } + + /** Box-Muller transform */ + private nextGaussian(): number { + const u1 = 1.0 - Math.random(); + const u2 = Math.random(); + return Math.sqrt(-2.0 * Math.log(u1)) * Math.sin(2.0 * Math.PI * u2); + } + + // ── Failure simulation ───────────────────────────────────────── + + private shouldTaskSucceed( + successRate: number, + failureMode: string, + input: Record, + ): boolean { + if (input.forceSuccess != null) { + const result = toBool(input.forceSuccess); + if (result.ok) return result.value; + } + if (input.forceFail != null) { + const result = toBool(input.forceFail); + if (result.ok) return !result.value; + } + + switch (failureMode.toLowerCase()) { + case "random": + return Math.random() < successRate; + + case "conditional": + return this.shouldConditionalSucceed(successRate, input); + + case "sequential": { + const attempt = getIntOrDefault(input, "attempt", 1); + const failUntilAttempt = getIntOrDefault( + input, + "failUntilAttempt", + 2, + ); + return attempt >= failUntilAttempt; + } + + default: + return Math.random() < successRate; + } + } + + private shouldConditionalSucceed( + successRate: number, + input: Record, + ): boolean { + const taskIndex = getIntOrDefault(input, "taskIndex", -1); + if (taskIndex >= 0) { + if (Array.isArray(input.failIndexes)) { + for (const idx of input.failIndexes) { + if (toInt(idx) === taskIndex) return false; + } + } + const failEvery = getIntOrDefault(input, "failEvery", 0); + if (failEvery > 0 && taskIndex % failEvery === 0) return false; + } + return Math.random() < successRate; + } + + // ── Output generation ────────────────────────────────────────── + + private generateOutput( + input: Record, + taskId: string, + taskIndex: number, + delayMs: number, + elapsedTimeMs: number, + outputSize: number, + ): Record { + const output: Record = { + taskId, + taskIndex, + codename: this.codename, + status: "completed", + configuredDelayMs: delayMs, + actualExecutionTimeMs: elapsedTimeMs, + a_or_b: Math.floor(Math.random() * 100) > 20 ? "a" : "b", + c_or_d: Math.floor(Math.random() * 100) > 33 ? "c" : "d", + }; + + if (getBoolOrDefault(input, "includeInput", false)) { + output.input = input; + } + + if (input.previousTaskOutput != null) { + output.previousTaskData = input.previousTaskOutput; + } + + if (outputSize > 0) { + output.data = generateRandomData(outputSize); + } + + if ( + input.outputTemplate != null && + typeof input.outputTemplate === "object" && + !Array.isArray(input.outputTemplate) + ) { + const template = input.outputTemplate as Record; + for (const [k, v] of Object.entries(template)) { + output[k] = v; + } + } + + return output; + } +} diff --git a/harness/workflowGovernor.ts b/harness/workflowGovernor.ts new file mode 100644 index 00000000..9865f126 --- /dev/null +++ b/harness/workflowGovernor.ts @@ -0,0 +1,65 @@ +import type { WorkflowExecutor } from "../src/sdk/clients/workflow/WorkflowExecutor"; + +export class WorkflowGovernor { + private readonly workflowExecutor: WorkflowExecutor; + private readonly workflowName: string; + private readonly workflowsPerSecond: number; + private timer: ReturnType | undefined; + + constructor( + workflowExecutor: WorkflowExecutor, + workflowName: string, + workflowsPerSecond: number, + ) { + this.workflowExecutor = workflowExecutor; + this.workflowName = workflowName; + this.workflowsPerSecond = workflowsPerSecond; + } + + start(): void { + console.log( + `WorkflowGovernor started: workflow=${this.workflowName}, rate=${this.workflowsPerSecond}/sec`, + ); + + this.timer = setInterval(() => { + this.startBatch(); + }, 1000); + + // Don't prevent process exit + if (this.timer && typeof this.timer === "object" && "unref" in this.timer) { + this.timer.unref(); + } + } + + stop(): void { + if (this.timer) { + clearInterval(this.timer); + this.timer = undefined; + } + console.log("WorkflowGovernor stopped"); + } + + private startBatch(): void { + const promises: Promise[] = []; + for (let i = 0; i < this.workflowsPerSecond; i++) { + promises.push( + this.workflowExecutor.startWorkflow({ + name: this.workflowName, + version: 1, + }), + ); + } + + Promise.all(promises) + .then(() => { + console.log( + `Governor: started ${this.workflowsPerSecond} workflow(s)`, + ); + }) + .catch((err: unknown) => { + console.error( + `Governor: error starting workflows: ${err instanceof Error ? err.message : String(err)}`, + ); + }); + } +} diff --git a/package-lock.json b/package-lock.json index 4fc3a619..a7c132d5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,7 +14,7 @@ "devDependencies": { "@eslint/js": "^9.34.0", "@hey-api/openapi-ts": "^0.85.2", - "@tsconfig/node18": "^18.2.4", + "@tsconfig/node24": "^24.0.4", "@types/node": "^22.0.0", "@types/uuid": "^9.0.1", "cross-env": "^10.1.0", @@ -2314,10 +2314,10 @@ "@sinonjs/commons": "^3.0.1" } }, - "node_modules/@tsconfig/node18": { - "version": "18.2.4", - "resolved": "https://registry.npmjs.org/@tsconfig/node18/-/node18-18.2.4.tgz", - "integrity": "sha512-5xxU8vVs9/FNcvm3gE07fPbn9tl6tqGGWA9tSlwsUEkBxtRnTsNmwrV8gasZ9F/EobaSv9+nu8AxUKccw77JpQ==", + "node_modules/@tsconfig/node24": { + "version": "24.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node24/-/node24-24.0.4.tgz", + "integrity": "sha512-2A933l5P5oCbv6qSxHs7ckKwobs8BDAe9SJ/Xr2Hy+nDlwmLE1GhFh/g/vXGRZWgxBg9nX/5piDtHR9Dkw/XuA==", "dev": true, "license": "MIT" }, diff --git a/package.json b/package.json index 615240aa..b125bd90 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "devDependencies": { "@eslint/js": "^9.34.0", "@hey-api/openapi-ts": "^0.85.2", - "@tsconfig/node18": "^18.2.4", + "@tsconfig/node24": "^24.0.4", "@types/node": "^22.0.0", "@types/uuid": "^9.0.1", "cross-env": "^10.1.0", @@ -82,7 +82,7 @@ "undici": "^7.16.0" }, "tsup": { - "target": "node18", + "target": "node24", "sourcemap": true, "format": [ "esm", @@ -90,7 +90,10 @@ ], "dts": true, "clean": true, - "splitting": false + "splitting": false, + "external": [ + "undici" + ] }, "jest-junit": { "outputDirectory": "reports", diff --git a/tsconfig.json b/tsconfig.json index 03434af5..1bd47664 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,5 +1,5 @@ { - "extends": "@tsconfig/node18/tsconfig.json", + "extends": "@tsconfig/node24/tsconfig.json", "compilerOptions": { "strict": true, "lib": [