diff --git a/cmd/obol/openclaw.go b/cmd/obol/openclaw.go index 80d4ec6..0434235 100644 --- a/cmd/obol/openclaw.go +++ b/cmd/obol/openclaw.go @@ -132,6 +132,90 @@ func openclawCommand(cfg *config.Config) *cli.Command { Name: "skills", Usage: "Manage OpenClaw skills", Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "List skills loaded in an OpenClaw instance", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{Name: "json", Usage: "Output as JSON"}, + &cli.BoolFlag{Name: "eligible", Usage: "Show only eligible (ready-to-use) skills"}, + &cli.BoolFlag{Name: "verbose", Aliases: []string{"v"}, Usage: "Show details including requirements"}, + }, + Action: func(c *cli.Context) error { + if c.NArg() == 0 { + return fmt.Errorf("instance ID required (e.g., obol openclaw skills list default)") + } + var args []string + if c.Bool("json") { + args = append(args, "--json") + } + if c.Bool("eligible") { + args = append(args, "--eligible") + } + if c.Bool("verbose") { + args = append(args, "-v") + } + return openclaw.SkillsCLI(cfg, c.Args().First(), append([]string{"list"}, args...)) + }, + }, + { + Name: "info", + Usage: "Show detailed information about a skill", + ArgsUsage: " ", + Flags: []cli.Flag{ + &cli.BoolFlag{Name: "json", Usage: "Output as JSON"}, + }, + Action: func(c *cli.Context) error { + if c.NArg() < 2 { + return fmt.Errorf("instance ID and skill name required (e.g., obol openclaw skills info default github)") + } + args := []string{"info", c.Args().Get(1)} + if c.Bool("json") { + args = append(args, "--json") + } + return openclaw.SkillsCLI(cfg, c.Args().First(), args) + }, + }, + { + Name: "check", + Usage: "Check which skills are ready vs missing requirements", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{Name: "json", Usage: "Output as JSON"}, + }, + Action: func(c *cli.Context) error { + if c.NArg() == 0 { + return fmt.Errorf("instance ID required (e.g., obol openclaw skills check default)") + } + args := []string{"check"} + if c.Bool("json") { + args = append(args, "--json") + } + return openclaw.SkillsCLI(cfg, c.Args().First(), args) + }, + }, + { + Name: "add", + Usage: "Install a skill from the clawhub registry and sync to pod", + ArgsUsage: " ", + Action: func(c *cli.Context) error { + if c.NArg() < 2 { + return fmt.Errorf("instance ID and skill slug required (e.g., obol openclaw skills add default austintgriffith/ethereum-wingman)") + } + return openclaw.SkillsAdd(cfg, c.Args().First(), c.Args().Get(1)) + }, + }, + { + Name: "remove", + Usage: "Remove a skill from the managed directory and re-sync", + ArgsUsage: " ", + Action: func(c *cli.Context) error { + if c.NArg() < 2 { + return fmt.Errorf("instance ID and skill name required (e.g., obol openclaw skills remove default ethereum-wingman)") + } + return openclaw.SkillsRemove(cfg, c.Args().First(), c.Args().Get(1)) + }, + }, { Name: "sync", Usage: "Package a local skills directory into a ConfigMap", diff --git a/internal/openclaw/openclaw.go b/internal/openclaw/openclaw.go index daf87c2..aadeae6 100644 --- a/internal/openclaw/openclaw.go +++ b/internal/openclaw/openclaw.go @@ -887,7 +887,7 @@ func SkillsSync(cfg *config.Config, id, skillsDir string) error { return fmt.Errorf("skills directory not found: %s", skillsDir) } - configMapName := fmt.Sprintf("openclaw-%s-skills", id) + configMapName := "openclaw-skills" archiveKey := "skills.tgz" fmt.Printf("Packaging skills from %s...\n", skillsDir) @@ -932,7 +932,26 @@ func SkillsSync(cfg *config.Config, id, skillsDir string) error { } fmt.Printf("✓ Skills ConfigMap updated: %s\n", configMapName) - fmt.Printf("\nTo apply, re-sync: obol openclaw sync %s\n", id) + + // Restart the deployment so the init container re-extracts skills + fmt.Printf("Restarting deployment to load new skills...\n") + restartCmd := exec.Command(kubectlBinary, "rollout", "restart", + "deployment/openclaw", "-n", namespace) + restartCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + if err := restartCmd.Run(); err != nil { + return fmt.Errorf("failed to restart deployment: %w", err) + } + + // Wait for rollout to complete + waitCmd := exec.Command(kubectlBinary, "rollout", "status", + "deployment/openclaw", "-n", namespace, "--timeout=60s") + waitCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + waitCmd.Stdout = os.Stdout + if err := waitCmd.Run(); err != nil { + return fmt.Errorf("rollout did not complete: %w", err) + } + + fmt.Printf("✓ Skills loaded in instance %s\n", id) return nil } diff --git a/internal/openclaw/overlay_test.go b/internal/openclaw/overlay_test.go index 638988f..e3bbb49 100644 --- a/internal/openclaw/overlay_test.go +++ b/internal/openclaw/overlay_test.go @@ -289,7 +289,7 @@ func TestRemoteCapableCommands(t *testing.T) { } // Commands that should go through kubectl exec - local := []string{"agent", "doctor", "config", "models", "message"} + local := []string{"agent", "doctor", "config", "models", "message", "skills"} for _, cmd := range local { if remoteCapableCommands[cmd] { t.Errorf("%q should NOT be remote-capable", cmd) diff --git a/internal/openclaw/skills.go b/internal/openclaw/skills.go new file mode 100644 index 0000000..6bdd5c5 --- /dev/null +++ b/internal/openclaw/skills.go @@ -0,0 +1,189 @@ +package openclaw + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/ObolNetwork/obol-stack/internal/config" +) + +// Skills management for OpenClaw instances. +// +// OpenClaw has built-in skill support (list, info, check) but the obol CLI +// only exposed "skills sync". This file fills that gap by: +// +// - Surfacing the in-pod openclaw CLI commands (list, info, check) via +// kubectl exec passthrough — zero reimplementation. +// - Adding host-side skill management (add, remove) that uses clawhub +// to install skills from the registry, then syncs to the pod. +// +// Skill hierarchy (OpenClaw loading precedence, later overrides earlier): +// +// 1. openclaw-bundled — built into the container image +// 2. openclaw-managed — ~/.config/obol/skills/ (inside container) +// 3. agents-skills — .agents/skills/ (project-local) +// 4. openclaw-workspace — skills/ (workspace, from ConfigMap mount) +// +// The managed skills directory (/skills/) on the host is +// synced to the pod's ConfigMap. Skills installed via "add" or pushed +// via "sync --from" end up in the workspace tier, which has highest +// precedence and cleanly overrides bundled defaults. + +// SkillsCLI delegates a skills subcommand to the in-pod openclaw binary +// via kubectl exec. OpenClaw's built-in "skills list|info|check" commands +// are exposed without reimplementing any logic. +func SkillsCLI(cfg *config.Config, id string, args []string) error { + return CLI(cfg, id, append([]string{"skills"}, args...)) +} + +// SkillsDir returns the managed skills directory for an instance. +// Skills installed via SkillsAdd are stored here and synced to the pod. +func SkillsDir(cfg *config.Config, id string) string { + return filepath.Join(deploymentPath(cfg, id), "skills") +} + +// findClawHub locates the clawhub CLI. Returns the binary path and any +// prefix args (e.g., ["clawhub"] when using npx as the runner). +func findClawHub() (binary string, prefixArgs []string, err error) { + if p, lookErr := exec.LookPath("clawhub"); lookErr == nil { + return p, nil, nil + } + if p, lookErr := exec.LookPath("npx"); lookErr == nil { + return p, []string{"clawhub"}, nil + } + return "", nil, fmt.Errorf("clawhub not found.\n\nInstall with:\n npm install -g clawhub\n\nOr ensure npx is available (Node.js 18+)") +} + +// SkillsAdd installs a skill from the clawhub registry into the +// instance's managed skills directory, then syncs to the pod. +// +// Usage: +// +// obol openclaw skills add +// obol openclaw skills add default austintgriffith/ethereum-wingman +func SkillsAdd(cfg *config.Config, id, slug string) error { + dir := deploymentPath(cfg, id) + if _, err := os.Stat(dir); os.IsNotExist(err) { + return fmt.Errorf("deployment not found: %s/%s\nRun 'obol openclaw onboard' first", appName, id) + } + + skillsDir := SkillsDir(cfg, id) + if err := os.MkdirAll(skillsDir, 0755); err != nil { + return fmt.Errorf("failed to create skills directory: %w", err) + } + + binary, prefixArgs, err := findClawHub() + if err != nil { + return err + } + + fmt.Printf("Installing skill %s...\n", slug) + args := append(prefixArgs, "install", slug, "--dir", skillsDir) + cmd := exec.Command(binary, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("clawhub install failed: %w", err) + } + + fmt.Printf("\nSyncing skills to instance %s...\n", id) + return SkillsSync(cfg, id, skillsDir) +} + +// SkillsRemove removes a skill from the managed directory and re-syncs +// remaining skills to the pod. +func SkillsRemove(cfg *config.Config, id, name string) error { + dir := deploymentPath(cfg, id) + if _, err := os.Stat(dir); os.IsNotExist(err) { + return fmt.Errorf("deployment not found: %s/%s\nRun 'obol openclaw onboard' first", appName, id) + } + + skillsDir := SkillsDir(cfg, id) + skillPath := filepath.Join(skillsDir, name) + + if _, err := os.Stat(skillPath); os.IsNotExist(err) { + return fmt.Errorf("skill %q not found in %s", name, skillsDir) + } + + if err := os.RemoveAll(skillPath); err != nil { + return fmt.Errorf("failed to remove skill: %w", err) + } + fmt.Printf("Removed skill %s\n", name) + + // Re-sync remaining skills to pod, or reset if none left + entries, _ := os.ReadDir(skillsDir) + if len(entries) == 0 { + fmt.Println("No managed skills remaining; resetting ConfigMap...") + return skillsReset(cfg, id) + } + + fmt.Printf("Syncing skills to instance %s...\n", id) + return SkillsSync(cfg, id, skillsDir) +} + +// skillsReset creates a ConfigMap with an empty tar archive and restarts +// the deployment. The init container sees a valid skills.tgz, clears the +// old extracted skills directory, and extracts nothing — effectively +// removing all managed skills from the pod. +func skillsReset(cfg *config.Config, id string) error { + namespace := fmt.Sprintf("%s-%s", appName, id) + kubeconfigPath := filepath.Join(cfg.ConfigDir, "kubeconfig.yaml") + kubectlBinary := filepath.Join(cfg.BinDir, "kubectl") + configMapName := "openclaw-skills" + + // Create an empty tar.gz archive so the init container's "rm -rf + extract" + // path runs and cleans the directory (an empty ConfigMap would just skip). + emptyTgz, err := os.CreateTemp("", "openclaw-empty-skills-*.tgz") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(emptyTgz.Name()) + + emptyDir, err := os.MkdirTemp("", "openclaw-empty-skills-dir-*") + if err != nil { + return fmt.Errorf("failed to create temp dir: %w", err) + } + defer os.RemoveAll(emptyDir) + + tarCmd := exec.Command("tar", "-czf", emptyTgz.Name(), "-C", emptyDir, ".") + if err := tarCmd.Run(); err != nil { + return fmt.Errorf("failed to create empty archive: %w", err) + } + emptyTgz.Close() + + kubeEnv := append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + + delCmd := exec.Command(kubectlBinary, "delete", "configmap", configMapName, + "-n", namespace, "--ignore-not-found") + delCmd.Env = kubeEnv + delCmd.Run() + + createCmd := exec.Command(kubectlBinary, "create", "configmap", configMapName, + "-n", namespace, + fmt.Sprintf("--from-file=skills.tgz=%s", emptyTgz.Name())) + createCmd.Env = kubeEnv + if err := createCmd.Run(); err != nil { + return fmt.Errorf("failed to reset ConfigMap: %w", err) + } + + restartCmd := exec.Command(kubectlBinary, "rollout", "restart", + "deployment/openclaw", "-n", namespace) + restartCmd.Env = kubeEnv + if err := restartCmd.Run(); err != nil { + return fmt.Errorf("failed to restart deployment: %w", err) + } + + waitCmd := exec.Command(kubectlBinary, "rollout", "status", + "deployment/openclaw", "-n", namespace, "--timeout=60s") + waitCmd.Env = kubeEnv + waitCmd.Stdout = os.Stdout + if err := waitCmd.Run(); err != nil { + return fmt.Errorf("rollout did not complete: %w", err) + } + + fmt.Printf("✓ Managed skills cleared from instance %s\n", id) + return nil +} diff --git a/internal/openclaw/skills_test.go b/internal/openclaw/skills_test.go new file mode 100644 index 0000000..34f1b46 --- /dev/null +++ b/internal/openclaw/skills_test.go @@ -0,0 +1,231 @@ +package openclaw + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ObolNetwork/obol-stack/internal/config" +) + +func TestSkillsDir(t *testing.T) { + tests := []struct { + name string + configDir string + id string + wantSuffix string + }{ + { + name: "default instance", + configDir: "/home/user/.config/obol", + id: "default", + wantSuffix: "applications/openclaw/default/skills", + }, + { + name: "petname instance", + configDir: "/home/user/.config/obol", + id: "happy-otter", + wantSuffix: "applications/openclaw/happy-otter/skills", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &config.Config{ConfigDir: tt.configDir} + got := SkillsDir(cfg, tt.id) + if !strings.HasSuffix(got, tt.wantSuffix) { + t.Errorf("SkillsDir() = %q, want suffix %q", got, tt.wantSuffix) + } + }) + } +} + +func TestSkillsNotRemoteCapable(t *testing.T) { + // "skills" must NOT be in remoteCapableCommands. + // This ensures SkillsCLI routes through kubectl exec (into the pod), + // not via port-forward (which requires --url/--token support). + if remoteCapableCommands["skills"] { + t.Error("'skills' should NOT be remote-capable; must route through kubectl exec") + } +} + +func TestSkillsRemove_MissingDeployment(t *testing.T) { + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir} + + err := SkillsRemove(cfg, "nonexistent", "some-skill") + if err == nil { + t.Fatal("expected error for missing deployment") + } + if !strings.Contains(err.Error(), "deployment not found") { + t.Errorf("error = %q, want containing 'deployment not found'", err.Error()) + } +} + +func TestSkillsRemove_SkillNotFound(t *testing.T) { + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir} + + // Create deployment directory but no skills + deployDir := filepath.Join(tmpDir, "applications", appName, "test-id") + if err := os.MkdirAll(deployDir, 0755); err != nil { + t.Fatal(err) + } + + err := SkillsRemove(cfg, "test-id", "nonexistent-skill") + if err == nil { + t.Fatal("expected error for missing skill") + } + if !strings.Contains(err.Error(), "not found") { + t.Errorf("error = %q, want containing 'not found'", err.Error()) + } +} + +func TestSkillsRemove_LastSkill(t *testing.T) { + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir, BinDir: tmpDir} + id := "test-remove" + + // Create deployment + one skill + skillsDir := filepath.Join(tmpDir, "applications", appName, id, "skills") + skillPath := filepath.Join(skillsDir, "my-skill") + if err := os.MkdirAll(skillPath, 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(skillPath, "SKILL.md"), []byte("---\nname: my-skill\n---\n# My Skill\n"), 0644); err != nil { + t.Fatal(err) + } + + // Remove the only skill — removal succeeds, but reset fails (no cluster/kubectl) + err := SkillsRemove(cfg, id, "my-skill") + + // Skill directory should be gone regardless of reset outcome + if _, statErr := os.Stat(skillPath); !os.IsNotExist(statErr) { + t.Error("skill directory should be removed") + } + + // Skills dir itself should still exist (just empty) + entries, readErr := os.ReadDir(skillsDir) + if readErr != nil { + t.Fatalf("failed to read skills dir: %v", readErr) + } + if len(entries) != 0 { + t.Errorf("skills dir should be empty, got %d entries", len(entries)) + } + + // Reset will fail without kubectl/cluster — that's expected in unit tests + if err != nil && !strings.Contains(err.Error(), "failed to reset ConfigMap") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestSkillsRemove_WithRemainingSkills(t *testing.T) { + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir} + id := "test-multi" + + // Create deployment + two skills + skillsDir := filepath.Join(tmpDir, "applications", appName, id, "skills") + for _, name := range []string{"skill-a", "skill-b"} { + dir := filepath.Join(skillsDir, name) + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "SKILL.md"), []byte("---\nname: "+name+"\n---\n"), 0644); err != nil { + t.Fatal(err) + } + } + + // Remove skill-a — should succeed but sync will fail (no cluster). + // The removal itself should complete; sync failure is expected in tests. + err := SkillsRemove(cfg, id, "skill-a") + + // skill-a should be removed regardless of sync outcome + if _, statErr := os.Stat(filepath.Join(skillsDir, "skill-a")); !os.IsNotExist(statErr) { + t.Error("skill-a directory should be removed") + } + + // skill-b should still exist + if _, statErr := os.Stat(filepath.Join(skillsDir, "skill-b")); os.IsNotExist(statErr) { + t.Error("skill-b directory should still exist") + } + + // Sync will fail without a cluster — that's expected + if err != nil && !strings.Contains(err.Error(), "cluster not running") && + !strings.Contains(err.Error(), "not found") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestSkillsAdd_MissingDeployment(t *testing.T) { + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir} + + err := SkillsAdd(cfg, "nonexistent", "some/skill") + if err == nil { + t.Fatal("expected error for missing deployment") + } + if !strings.Contains(err.Error(), "deployment not found") { + t.Errorf("error = %q, want containing 'deployment not found'", err.Error()) + } +} + +func TestSkillsAdd_CreatesManagedDir(t *testing.T) { + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir} + id := "test-add" + + // Create deployment directory (but not skills subdir) + deployDir := filepath.Join(tmpDir, "applications", appName, id) + if err := os.MkdirAll(deployDir, 0755); err != nil { + t.Fatal(err) + } + + // Clear PATH so findClawHub fails fast (no network access needed) + origPath := os.Getenv("PATH") + os.Setenv("PATH", "") + defer os.Setenv("PATH", origPath) + + // SkillsAdd will fail at findClawHub, but the managed dir + // should have been created before the binary lookup. + _ = SkillsAdd(cfg, id, "some/skill") + + skillsDir := SkillsDir(cfg, id) + if _, err := os.Stat(skillsDir); os.IsNotExist(err) { + t.Error("skills directory should be created even if clawhub is not available") + } +} + +func TestFindClawHub_ReturnsErrorWhenMissing(t *testing.T) { + // Save and clear PATH to ensure neither clawhub nor npx is found + origPath := os.Getenv("PATH") + os.Setenv("PATH", "") + defer os.Setenv("PATH", origPath) + + _, _, err := findClawHub() + if err == nil { + t.Fatal("expected error when clawhub and npx are not in PATH") + } + if !strings.Contains(err.Error(), "clawhub not found") { + t.Errorf("error = %q, want containing 'clawhub not found'", err.Error()) + } +} + +func TestSkillsDirStructure(t *testing.T) { + // Verify the skills dir is nested inside the deployment dir + tmpDir := t.TempDir() + cfg := &config.Config{ConfigDir: tmpDir} + + skillsDir := SkillsDir(cfg, "my-instance") + deployDir := deploymentPath(cfg, "my-instance") + + if !strings.HasPrefix(skillsDir, deployDir) { + t.Errorf("skills dir %q should be under deployment dir %q", skillsDir, deployDir) + } + + // Should end with /skills + if filepath.Base(skillsDir) != "skills" { + t.Errorf("skills dir should end with 'skills', got %q", filepath.Base(skillsDir)) + } +} diff --git a/skills/_tests/smoke_test.py b/skills/_tests/smoke_test.py new file mode 100644 index 0000000..ea39854 --- /dev/null +++ b/skills/_tests/smoke_test.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +"""Smoke tests for OpenClaw skills (obol-blockchain, obol-k8s, obol-dvt). + +Run inside the OpenClaw pod: + obol kubectl exec -i -n openclaw-default deploy/openclaw -- python3 - < skills/_tests/smoke_test.py +""" + +import json +import os +import re +import subprocess +import sys +import urllib.request + +SKILLS_DIR = "/data/.openclaw/skills-injected" +RPC = os.path.join(SKILLS_DIR, "obol-blockchain", "scripts", "rpc.py") +KUBE = os.path.join(SKILLS_DIR, "obol-k8s", "scripts", "kube.py") + +passed = 0 +failed = 0 +errors = [] + + +def test(name, fn): + global passed, failed + try: + fn() + passed += 1 + print(f" \033[32mPASS\033[0m {name}") + except AssertionError as e: + failed += 1 + errors.append((name, str(e))) + print(f" \033[31mFAIL\033[0m {name}: {e}") + except Exception as e: + failed += 1 + errors.append((name, f"unexpected: {e}")) + print(f" \033[31mFAIL\033[0m {name}: unexpected error: {e}") + + +def run(cmd, timeout=30): + r = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout) + return r.returncode, r.stdout.strip(), r.stderr.strip() + + +def http_get(url, timeout=15): + req = urllib.request.Request(url) + with urllib.request.urlopen(req, timeout=timeout) as resp: + return resp.status, json.loads(resp.read()) + + +# ────────────────────────────────────────────── +# obol-blockchain tests +# ────────────────────────────────────────────── +print("\n\033[1m--- obol-blockchain ---\033[0m") + + +def test_blockchain_files(): + for f in [ + "SKILL.md", + "scripts/rpc.py", + "references/erc20-methods.md", + "references/common-contracts.md", + ]: + path = os.path.join(SKILLS_DIR, "obol-blockchain", f) + assert os.path.isfile(path), f"missing: {f}" + + +test("blockchain/files_exist", test_blockchain_files) + + +def test_block_number(): + rc, out, err = run(["python3", RPC, "eth_blockNumber"]) + assert rc == 0, f"exit {rc}: {err}" + assert "Block:" in out, f"unexpected output: {out}" + m = re.search(r"Block:\s+([\d,]+)", out) + assert m, f"no block number found in: {out}" + block = int(m.group(1).replace(",", "")) + assert block > 20_000_000, f"block number too low: {block}" + + +test("blockchain/block_number", test_block_number) + + +def test_chain_id(): + rc, out, err = run(["python3", RPC, "eth_chainId"]) + assert rc == 0, f"exit {rc}: {err}" + assert "Chain ID: 1" in out, f"unexpected chain id: {out}" + assert "mainnet" in out, f"missing 'mainnet' in: {out}" + + +test("blockchain/chain_id", test_chain_id) + + +def test_gas_price(): + rc, out, err = run(["python3", RPC, "eth_gasPrice"]) + assert rc == 0, f"exit {rc}: {err}" + assert "Gwei" in out, f"missing 'Gwei' in: {out}" + m = re.search(r"([\d.]+)\s*Gwei", out) + assert m, f"no gwei value in: {out}" + gwei = float(m.group(1)) + assert gwei > 0, f"gas price is 0" + + +test("blockchain/gas_price", test_gas_price) + + +def test_eth_balance(): + vitalik = "0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045" + rc, out, err = run(["python3", RPC, "eth_getBalance", vitalik]) + assert rc == 0, f"exit {rc}: {err}" + assert "ETH" in out, f"missing 'ETH' in: {out}" + m = re.search(r"([\d.]+)\s*ETH", out) + assert m, f"no ETH value in: {out}" + eth = float(m.group(1)) + assert eth > 0, f"balance is 0" + + +test("blockchain/eth_balance", test_eth_balance) + + +def test_erc20_total_supply(): + usdc = "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48" + rc, out, err = run(["python3", RPC, "eth_call", usdc, "0x18160ddd"]) + assert rc == 0, f"exit {rc}: {err}" + assert "Result:" in out or "0x" in out, f"unexpected output: {out}" + + +test("blockchain/erc20_total_supply", test_erc20_total_supply) + + +def test_hoodi_chain_id(): + rc, out, err = run(["python3", RPC, "--network", "evm/560048", "eth_chainId"]) + assert rc == 0, f"exit {rc}: {err}" + assert "560048" in out, f"missing '560048' in: {out}" + assert "hoodi" in out, f"missing 'hoodi' in: {out}" + + +test("blockchain/hoodi_chain_id", test_hoodi_chain_id) + + +# ────────────────────────────────────────────── +# obol-k8s tests +# ────────────────────────────────────────────── +print("\n\033[1m--- obol-k8s ---\033[0m") + + +def test_k8s_files(): + for f in ["SKILL.md", "scripts/kube.py"]: + path = os.path.join(SKILLS_DIR, "obol-k8s", f) + assert os.path.isfile(path), f"missing: {f}" + + +test("k8s/files_exist", test_k8s_files) + +# We'll capture pod name from the pods test for use in logs test +_discovered_pod = [None] + + +def test_pods(): + rc, out, err = run(["python3", KUBE, "pods"]) + assert rc == 0, f"exit {rc}: {err}" + assert "openclaw" in out.lower(), f"no openclaw pod in: {out}" + # extract first pod name containing "openclaw" + for line in out.splitlines(): + if "openclaw" in line.lower() and not line.startswith(("-", "NAME", "=")): + _discovered_pod[0] = line.split()[0] + break + + +test("k8s/pods", test_pods) + + +def test_services(): + rc, out, err = run(["python3", KUBE, "services"]) + assert rc == 0, f"exit {rc}: {err}" + assert len(out) > 0, "empty output" + + +test("k8s/services", test_services) + + +def test_deployments(): + rc, out, err = run(["python3", KUBE, "deployments"]) + assert rc == 0, f"exit {rc}: {err}" + assert "openclaw" in out.lower(), f"no openclaw deployment in: {out}" + + +test("k8s/deployments", test_deployments) + + +def test_events(): + rc, out, err = run(["python3", KUBE, "events"]) + assert rc == 0, f"exit {rc}: {err}" + # events may legitimately be empty + + +test("k8s/events", test_events) + + +def test_configmaps(): + rc, out, err = run(["python3", KUBE, "configmaps"]) + assert rc == 0, f"exit {rc}: {err}" + assert "openclaw" in out.lower(), f"no openclaw configmap in: {out}" + + +test("k8s/configmaps", test_configmaps) + + +def test_logs(): + pod = _discovered_pod[0] + assert pod, "no pod discovered from pods test" + rc, out, err = run(["python3", KUBE, "logs", pod, "--tail", "10"]) + assert rc == 0, f"exit {rc}: {err}" + assert len(out) > 0, "empty logs" + + +test("k8s/logs", test_logs) + + +def test_describe_deployment(): + rc, out, err = run(["python3", KUBE, "describe", "deployment", "openclaw"]) + assert rc == 0, f"exit {rc}: {err}" + assert "replica" in out.lower() or "Replica" in out, f"no replicas info in: {out[:200]}" + + +test("k8s/describe_deployment", test_describe_deployment) + + +# ────────────────────────────────────────────── +# obol-dvt tests +# ────────────────────────────────────────────── +print("\n\033[1m--- obol-dvt ---\033[0m") + + +def test_dvt_files(): + for f in ["SKILL.md", "references/api-examples.md"]: + path = os.path.join(SKILLS_DIR, "obol-dvt", f) + assert os.path.isfile(path), f"missing: {f}" + + +test("dvt/files_exist", test_dvt_files) + + +def curl_json(url): + """Fetch JSON via curl (matches how the DVT skill documents API access).""" + rc, out, err = run(["curl", "-sf", url]) + assert rc == 0, f"curl failed (exit {rc}): {err}" + return json.loads(out) + + +def test_obol_api_health(): + # _health returns 503 when any sub-check is down, so use curl without -f + rc, out, err = run(["curl", "-s", "https://api.obol.tech/v1/_health"]) + assert rc == 0, f"curl failed: {err}" + data = json.loads(out) + assert "status" in data, f"no status field in: {data}" + # mainnet beacon should be up even if other networks aren't + details = data.get("details", data.get("info", {})) + mainnet = details.get("mainnet beacon node health", {}) + assert mainnet.get("status") == "up", f"mainnet beacon not up: {details}" + + +test("dvt/api_health", test_obol_api_health) + + +def test_network_summary(): + data = curl_json("https://api.obol.tech/v1/lock/network/summary/mainnet") + assert isinstance(data, dict), f"expected dict, got {type(data)}" + clusters = data.get("total_clusters", data.get("totalClusters", 0)) + assert clusters > 0, f"total_clusters is 0 or missing: {data}" + + +test("dvt/network_summary", test_network_summary) + + +# ────────────────────────────────────────────── +# Summary +# ────────────────────────────────────────────── +print(f"\n{'='*50}") +if errors: + print("\nFailures:") + for name, msg in errors: + print(f" - {name}: {msg}") + print() + +total = passed + failed +print(f"Results: \033[32m{passed} passed\033[0m, \033[31m{failed} failed\033[0m, {total} total") +sys.exit(1 if failed else 0) diff --git a/skills/obol-blockchain/SKILL.md b/skills/obol-blockchain/SKILL.md new file mode 100644 index 0000000..c01a5a9 --- /dev/null +++ b/skills/obol-blockchain/SKILL.md @@ -0,0 +1,194 @@ +--- +name: obol-blockchain +description: "Blockchain RPC and Ethereum operations via local eRPC gateway. Use when: querying blocks, balances, transactions, contract state, token balances, ENS names, gas prices, or any eth_* method. Handles JSON-RPC encoding, hex conversion, and ABI decoding. Routes all queries through the in-cluster eRPC load balancer. NOT for: sending transactions, deploying contracts, DVT/validator operations (use obol-dvt), or Kubernetes operations (use obol-k8s)." +metadata: { "openclaw": { "emoji": "⛓️", "requires": { "bins": ["curl", "python3"] } } } +--- + +# Obol Blockchain + +Query Ethereum blockchain state through the local eRPC gateway. Covers raw JSON-RPC methods, ERC-20 token operations, ENS resolution, gas estimation, and transaction analysis. + +## Paths + +All script paths in this document are relative to this skill's directory. When running from the pod, prefix with the skill's installed location: + +```bash +python3 /data/.openclaw/skills-injected/obol-blockchain/scripts/rpc.py eth_blockNumber +``` + +## When to Use + +- "What's the latest block number?" +- "Check balance of 0x..." +- "Read contract state / call a view function" +- "What are the token balances for this address?" +- "Resolve an ENS name" +- "Estimate gas for this call" +- "Look up a transaction receipt" +- Any `eth_*`, `net_*`, or `web3_*` JSON-RPC method + +## When NOT to Use + +- Sending transactions or signing — no private keys available (read-only) +- Deploying contracts — no write access +- DVT cluster monitoring — use `obol-dvt` +- Kubernetes pod health — use `obol-k8s` + +## Environment + +The eRPC gateway supports two URL path formats: + +``` +Alias: http://erpc.erpc.svc.cluster.local:4000/rpc/{alias} e.g. /rpc/mainnet +Explicit: http://erpc.erpc.svc.cluster.local:4000/rpc/evm/{chainId} e.g. /rpc/evm/1 +``` + +`mainnet` alias is always configured. Other network aliases (e.g. `hoodi`) are only available if that Ethereum network has been installed. As a fallback, you can use the explicit `evm/{chainId}` format — for example `/rpc/evm/560048` for Hoodi. + +The helper script defaults to `mainnet`. Override with `--network` flag or `ERPC_NETWORK` env var. The script accepts both aliases (`mainnet`) and explicit paths (`evm/560048`). + +## Quick Start + +```bash +# Block number (mainnet default) +python3 scripts/rpc.py eth_blockNumber + +# Block number on hoodi testnet (use evm/chainId if alias not configured) +python3 scripts/rpc.py --network hoodi eth_blockNumber +python3 scripts/rpc.py --network evm/560048 eth_blockNumber + +# Balance (returns ETH) +python3 scripts/rpc.py eth_getBalance 0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045 + +# Gas price (returns Gwei) +python3 scripts/rpc.py eth_gasPrice + +# Chain ID +python3 scripts/rpc.py eth_chainId + +# Contract read (ERC-20 totalSupply) +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x18160ddd +``` + +## JSON-RPC Methods + +| Method | Params | Returns | +|--------|--------|---------| +| `eth_blockNumber` | none | Latest block number | +| `eth_getBalance` | `address [block]` | Balance in wei (script converts to ETH) | +| `eth_gasPrice` | none | Gas price in wei (script converts to Gwei) | +| `eth_chainId` | none | Chain ID (1=mainnet, 560048=hoodi) | +| `eth_getBlockByNumber` | `blockNum includeTxs` | Block data | +| `eth_getTransactionByHash` | `txHash` | Transaction details | +| `eth_getTransactionReceipt` | `txHash` | Receipt with logs and status | +| `eth_call` | `to data [block]` | Contract read result | +| `eth_estimateGas` | `to data [from] [value]` | Gas estimate | +| `eth_getLogs` | `fromBlock toBlock [address] [topic0]` | Event logs | +| `net_version` | none | Network ID | + +## Token Operations + +Read ERC-20 token state using `eth_call` with the contract address and function selector. + +### Check Token Balance + +```bash +# balanceOf(address) selector: 0x70a08231 +# Pad address to 32 bytes (left-pad with zeros, remove 0x prefix) +python3 scripts/rpc.py eth_call \ + 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 \ + 0x70a08231000000000000000000000000d8dA6BF26964aF9D7eEd9e03E53415D37aA96045 +``` + +### Get Token Info + +```bash +# name() -> 0x06fdde03 +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x06fdde03 + +# symbol() -> 0x95d89b41 +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x95d89b41 + +# decimals() -> 0x313ce567 +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x313ce567 + +# totalSupply() -> 0x18160ddd +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x18160ddd +``` + +See `references/erc20-methods.md` for the complete function selector reference and ABI encoding guide. + +## ENS Resolution (Mainnet Only) + +ENS names resolve through the ENS registry at `0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e`. + +```bash +# Step 1: Get resolver for a name (namehash required) +# Step 2: Call resolver.addr(namehash) to get the address + +# For common names, use the public resolver directly: +# PublicResolver: 0x231b0Ee14048e9dCcD1d247744d114a4EB5E8E63 +# addr(bytes32 node) selector: 0x3b3b57de +``` + +ENS resolution requires computing the namehash using **Keccak-256** (Ethereum's hash function). + +**Warning:** Python's `hashlib.sha3_256` is NIST SHA-3, NOT Keccak-256. They use different internal padding and produce different outputs. Do not use `hashlib.sha3_256` for ENS namehash — it will return wrong results. + +Computing namehash correctly requires a Keccak-256 library (e.g., `pysha3`, `pycryptodome`, or `ethers.js`). Since these aren't available in the pod, ENS resolution is limited to names with known namehashes or external lookup services. + +## Gas Estimation + +```bash +# Estimate gas for a transfer +python3 scripts/rpc.py eth_estimateGas \ + 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 \ + 0xa9059cbb000000000000000000000000... + +# Current gas price +python3 scripts/rpc.py eth_gasPrice + +# Total cost estimate: gasEstimate * gasPrice +``` + +## Transaction Analysis + +```bash +# Get transaction details +python3 scripts/rpc.py eth_getTransactionByHash 0xabc123... + +# Get receipt with logs +python3 scripts/rpc.py eth_getTransactionReceipt 0xabc123... +``` + +Receipt fields: `status` (0x1=success, 0x0=revert), `gasUsed`, `logs[]` (events emitted). + +## Direct curl + +When the helper script doesn't cover a method or you need custom params: + +```bash +# Mainnet +curl -s -X POST "$ERPC_URL/mainnet" \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + | python3 -c "import sys,json; r=json.load(sys.stdin); print(int(r['result'],16) if 'result' in r else r)" + +# Hoodi testnet (alias — requires hoodi network installed) +curl -s -X POST "$ERPC_URL/hoodi" \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' + +# Hoodi testnet (explicit chain ID — valid URL, but requires hoodi network installed) +curl -s -X POST "$ERPC_URL/evm/560048" \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' +``` + +## Constraints + +- **Read-only** — no private keys, no transaction signing, no state mutations +- **Local routing** — always use eRPC (`$ERPC_URL`), never call external RPC providers directly +- **Hex encoding** — JSON-RPC uses hex for numbers and bytes; the helper script converts common cases +- **Block parameter** — `latest` (default), `earliest`, `pending`, or hex block number +- See `references/common-contracts.md` for well-known contract addresses diff --git a/skills/obol-blockchain/references/common-contracts.md b/skills/obol-blockchain/references/common-contracts.md new file mode 100644 index 0000000..9227956 --- /dev/null +++ b/skills/obol-blockchain/references/common-contracts.md @@ -0,0 +1,66 @@ +# Common Contract Addresses + +## Mainnet (Chain ID: 1) + +### Tokens + +| Token | Address | Decimals | +|-------|---------|----------| +| WETH | `0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2` | 18 | +| USDC | `0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48` | 6 | +| USDT | `0xdAC17F958D2ee523a2206206994597C13D831ec7` | 6 | +| DAI | `0x6B175474E89094C44Da98b954EedeAC495271d0F` | 18 | +| WBTC | `0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599` | 8 | +| stETH | `0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84` | 18 | +| wstETH | `0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0` | 18 | + +### DeFi Protocols + +| Protocol | Contract | Address | +|----------|----------|---------| +| Uniswap V2 | Router | `0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D` | +| Uniswap V3 | Router | `0xE592427A0AEce92De3Edee1F18E0157C05861564` | +| Uniswap V3 | Quoter | `0xb27308f9F90D607463bb33eA1BeBb41C27CE5AB6` | + +### ENS + +| Contract | Address | +|----------|---------| +| ENS Registry | `0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e` | +| Public Resolver | `0x231b0Ee14048e9dCcD1d247744d114a4EB5E8E63` | +| Reverse Registrar | `0xa58E81fe9b61B5c3fE2AFD33CF304c454AbFc7Cb` | + +### Obol Network + +| Contract | Address | +|----------|---------| +| Obol Token (OBOL) | `0x0B010000b7624eb9B3DfBC279673C76E9D29D5F7` | + +## Hoodi Testnet (Chain ID: 560048) + +Hoodi is a newer testnet. Contract addresses may differ from mainnet. Use `eth_chainId` to confirm you're on the right network before querying. + +## Quick Queries + +### Check if an address is a contract + +```bash +python3 scripts/rpc.py eth_getCode 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 latest +# Returns bytecode if contract, "0x" if EOA +``` + +### Get USDC balance + +```bash +python3 scripts/rpc.py eth_call \ + 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 \ + 0x70a08231000000000000000000000000 +# Result is in 6 decimals: divide by 1e6 +``` + +### Get ETH balance + +```bash +python3 scripts/rpc.py eth_getBalance 0x
+# Result is auto-converted to ETH +``` diff --git a/skills/obol-blockchain/references/erc20-methods.md b/skills/obol-blockchain/references/erc20-methods.md new file mode 100644 index 0000000..9b9e109 --- /dev/null +++ b/skills/obol-blockchain/references/erc20-methods.md @@ -0,0 +1,91 @@ +# ERC-20 Function Selectors & ABI Encoding + +## Standard ERC-20 Functions + +| Function | Selector | Params | Returns | +|----------|----------|--------|---------| +| `name()` | `0x06fdde03` | none | string | +| `symbol()` | `0x95d89b41` | none | string | +| `decimals()` | `0x313ce567` | none | uint8 | +| `totalSupply()` | `0x18160ddd` | none | uint256 | +| `balanceOf(address)` | `0x70a08231` | owner address | uint256 | +| `transfer(address,uint256)` | `0xa9059cbb` | to, amount | bool | +| `approve(address,uint256)` | `0x095ea7b3` | spender, amount | bool | +| `allowance(address,address)` | `0xdd62ed3e` | owner, spender | uint256 | +| `transferFrom(address,address,uint256)` | `0x23b872dd` | from, to, amount | bool | + +## Event Signatures + +| Event | Topic0 | +|-------|--------| +| `Transfer(address,address,uint256)` | `0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef` | +| `Approval(address,address,uint256)` | `0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925` | + +## ABI Encoding Guide + +### Address Encoding + +Addresses are left-padded to 32 bytes (64 hex chars): + +``` +0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045 +becomes: +000000000000000000000000d8dA6BF26964aF9D7eEd9e03E53415D37aA96045 +``` + +### Building eth_call Data + +Concatenate the function selector (4 bytes) with encoded parameters (32 bytes each): + +``` +balanceOf(0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045): + +data = 0x70a08231 (selector) + + 000000000000000000000000d8dA6BF26964aF9D7eEd9e03E53415D37aA96045 (address) + += 0x70a08231000000000000000000000000d8dA6BF26964aF9D7eEd9e03E53415D37aA96045 +``` + +### Multiple Parameters + +``` +allowance(owner, spender): + +data = 0xdd62ed3e (selector) + + 000000000000000000000000 (owner) + + 000000000000000000000000 (spender) +``` + +### Decoding Return Values + +- **uint256**: Hex string, 32 bytes. Convert to decimal: `int(result, 16)` +- **bool**: `0x...01` = true, `0x...00` = false +- **string**: ABI-encoded with offset + length + data (complex, use python3) +- **address**: Last 20 bytes of 32-byte value + +### Decoding Token Amounts + +Always check `decimals()` first: + +```python +raw = int(result, 16) # raw balance from balanceOf +decimals = int(dec_result, 16) # from decimals() +balance = raw / (10 ** decimals) +``` + +Common decimals: USDC/USDT = 6, DAI/WETH = 18. + +## Example: Full Token Query + +```bash +# 1. Get token name +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x06fdde03 + +# 2. Get decimals +python3 scripts/rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x313ce567 + +# 3. Get balance for vitalik.eth +python3 scripts/rpc.py eth_call \ + 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 \ + 0x70a08231000000000000000000000000d8dA6BF26964aF9D7eEd9e03E53415D37aA96045 +``` diff --git a/skills/obol-blockchain/scripts/rpc.py b/skills/obol-blockchain/scripts/rpc.py new file mode 100644 index 0000000..3a55fce --- /dev/null +++ b/skills/obol-blockchain/scripts/rpc.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +"""JSON-RPC helper for Obol Stack eRPC gateway. + +Usage: + python3 rpc.py [param1] [param2] ... + python3 rpc.py --network hoodi [param1] ... + +Examples: + python3 rpc.py eth_blockNumber + python3 rpc.py eth_getBalance 0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045 + python3 rpc.py eth_call 0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48 0x18160ddd + python3 rpc.py --network hoodi eth_blockNumber +""" + +import json +import os +import sys +import urllib.request + +# eRPC requires /rpc/{network} path. ERPC_URL is the base (without network). +ERPC_BASE = os.environ.get("ERPC_URL", "http://erpc.erpc.svc.cluster.local:4000/rpc") +DEFAULT_NETWORK = os.environ.get("ERPC_NETWORK", "mainnet") + +# Methods that take no params +NO_PARAM_METHODS = {"eth_blockNumber", "eth_gasPrice", "eth_chainId", "net_version", "web3_clientVersion"} + + +def rpc_call(method, params=None, network=None): + """Send a JSON-RPC request and return the result.""" + net = network or DEFAULT_NETWORK + url = f"{ERPC_BASE}/{net}" + + payload = json.dumps({ + "jsonrpc": "2.0", + "method": method, + "params": params or [], + "id": 1, + }).encode() + + req = urllib.request.Request( + url, + data=payload, + headers={"Content-Type": "application/json"}, + ) + + with urllib.request.urlopen(req, timeout=30) as resp: + data = json.loads(resp.read()) + + if "error" in data: + code = data["error"].get("code", "?") + msg = data["error"].get("message", "unknown error") + print(f"RPC error {code}: {msg}", file=sys.stderr) + sys.exit(1) + + return data.get("result") + + +def hex_to_int(val): + """Convert hex string to int.""" + if isinstance(val, str) and val.startswith("0x"): + return int(val, 16) + return val + + +def format_result(method, result): + """Format result based on method for human readability.""" + if result is None: + print("null") + return + + if method == "eth_blockNumber": + block = hex_to_int(result) + print(f"Block: {block:,} (0x{block:x})") + + elif method == "eth_getBalance": + wei = hex_to_int(result) + eth = wei / 1e18 + print(f"Balance: {eth:.6f} ETH ({wei:,} wei)") + + elif method == "eth_gasPrice": + wei = hex_to_int(result) + gwei = wei / 1e9 + print(f"Gas price: {gwei:.2f} Gwei ({wei:,} wei)") + + elif method == "eth_chainId": + chain_id = hex_to_int(result) + names = {1: "mainnet", 560048: "hoodi", 11155111: "sepolia"} + name = names.get(chain_id, "unknown") + print(f"Chain ID: {chain_id} ({name})") + + elif method == "eth_estimateGas": + gas = hex_to_int(result) + print(f"Gas estimate: {gas:,}") + + elif isinstance(result, str) and result.startswith("0x") and len(result) <= 66: + # Short hex result — show both hex and decimal + val = hex_to_int(result) + print(f"Result: {val} (0x{val:x})") + + elif isinstance(result, (dict, list)): + print(json.dumps(result, indent=2)) + + else: + print(result) + + +def build_params(method, args): + """Build JSON-RPC params array from CLI arguments.""" + if method in NO_PARAM_METHODS: + return [] + + if method == "eth_getBalance": + addr = args[0] if args else "0x0" + block = args[1] if len(args) > 1 else "latest" + return [addr, block] + + if method == "eth_getBlockByNumber": + block = args[0] if args else "latest" + include_txs = args[1].lower() == "true" if len(args) > 1 else False + return [block, include_txs] + + if method in ("eth_getTransactionByHash", "eth_getTransactionReceipt"): + return [args[0]] if args else [] + + if method == "eth_call": + to_addr = args[0] if args else "0x0" + data = args[1] if len(args) > 1 else "0x" + block = args[2] if len(args) > 2 else "latest" + return [{"to": to_addr, "data": data}, block] + + if method == "eth_estimateGas": + to_addr = args[0] if args else "0x0" + data = args[1] if len(args) > 1 else "0x" + obj = {"to": to_addr, "data": data} + if len(args) > 2: + obj["from"] = args[2] + if len(args) > 3: + obj["value"] = args[3] + return [obj] + + if method == "eth_getLogs": + from_block = args[0] if args else "latest" + to_block = args[1] if len(args) > 1 else "latest" + log_filter = {"fromBlock": from_block, "toBlock": to_block} + if len(args) > 2: + log_filter["address"] = args[2] + if len(args) > 3: + log_filter["topics"] = [args[3]] + return [log_filter] + + # Fallback: pass args as-is + return list(args) + + +def main(): + argv = sys.argv[1:] + + # Parse --network flag + network = None + if "--network" in argv: + idx = argv.index("--network") + if idx + 1 < len(argv): + network = argv[idx + 1] + argv = argv[:idx] + argv[idx + 2:] + else: + print("Error: --network requires a value (mainnet, hoodi, sepolia)", file=sys.stderr) + sys.exit(1) + + if not argv: + net = network or DEFAULT_NETWORK + print(f"Usage: python3 rpc.py [--network NAME] [param1] [param2] ...") + print(f"\nEndpoint: {ERPC_BASE}/{net}") + print(f"Network: {net}") + print("\nCommon methods:") + print(" eth_blockNumber") + print(" eth_getBalance
[block]") + print(" eth_gasPrice") + print(" eth_chainId") + print(" eth_call [block]") + print(" eth_getLogs [address] [topic0]") + print(" eth_getTransactionReceipt ") + sys.exit(1) + + method = argv[0] + args = argv[1:] + params = build_params(method, args) + result = rpc_call(method, params, network=network) + format_result(method, result) + + +if __name__ == "__main__": + main() diff --git a/skills/obol-dvt/SKILL.md b/skills/obol-dvt/SKILL.md new file mode 100644 index 0000000..512acf7 --- /dev/null +++ b/skills/obol-dvt/SKILL.md @@ -0,0 +1,255 @@ +--- +name: obol-dvt +description: "Distributed Validator (DVT) cluster monitoring, operator management, and exit coordination via Obol Network API. Use when: querying DVT clusters, checking validator performance, investigating operator status, coordinating exits, or discussing Obol/Charon/DKG concepts. Uses mcporter MCP tools if configured, falls back to direct Obol API calls via curl. NOT for: creating clusters, running DKG, or submitting exits (write operations)." +metadata: { "openclaw": { "emoji": "🔱", "requires": { "bins": ["curl"] } } } +--- + +# Obol Distributed Validator (DV) Skill + +Query and monitor Distributed Validators on the Obol Network. Covers cluster health, operator management, exit coordination, and DVT concepts. + +--- + +## What is a Distributed Validator? + +A **Distributed Validator (DV)** is an Ethereum validator whose private key is never held +by a single party. Instead, the signing key is split across a group of **operators** using +threshold BLS cryptography. Any `threshold-of-N` operators must cooperate to produce a valid +signature — so the validator keeps attesting even if some operators go offline, and no single +operator can act maliciously on their own. + +Obol's open-source middleware is called **Charon**. Each operator runs a Charon client +alongside their validator client (e.g., Lighthouse, Teku). Charon handles the consensus +protocol between operators so the validator appears as a single validator to the beacon chain. + +| Term | Meaning | +|------|---------| +| **Cluster** | A group of N operators running DVs together | +| **Threshold** | Minimum operators needed to sign (e.g., 3-of-4) | +| **DKG** | Distributed Key Generation — operators collaboratively create the shared key without anyone seeing the full private key | +| **Cluster Definition** | Pre-DKG proposal: who the operators are, how many validators, which network | +| **Cluster Lock** | Post-DKG artifact: locked configuration + generated validator public keys; identified by `lock_hash` | +| **config_hash** | Hash of the cluster definition (pre-DKG); also embedded in the lock | +| **lock_hash** | Hash of the cluster lock (post-DKG); the primary identifier for a running cluster | +| **Operator** | An Ethereum address that participates in one or more DV clusters | +| **Techne** | Obol's operator reputation system: base > bronze > silver > gold | +| **OWR** | Optimistic Withdrawal Recipient — a smart contract that splits validator rewards | + +--- + +## Cluster Lifecycle + +``` +[Cluster Definition] -> operators agree on config, sign T&Cs + | + [DKG Ceremony] -> Charon nodes exchange key shares; no full key ever assembled + | + [Cluster Lock] -> validator pubkeys generated; cluster is live on beacon chain + | + [Active Validators] -> attesting, proposing blocks, earning rewards + | + [Exit Coordination] -> operators sign exit messages; broadcast when threshold reached +``` + +When a user provides a `config_hash`, they are referring to something at or before DKG. +When they provide a `lock_hash`, the cluster has completed DKG and may have active validators. + +--- + +## API Access + +**Base URL:** `https://api.obol.tech` (public, no authentication needed) + +### Preferred: mcporter MCP tools + +If mcporter is configured with the obol-mcp server: + +```bash +mcporter call obol.obol_cluster_lock_by_hash lock_hash=0x4d6e7f8a... +mcporter call obol.obol_cluster_effectiveness lock_hash=0x4d6e7f8a... +``` + +Check availability: `mcporter list obol 2>/dev/null` + +### Fallback: Direct curl + +```bash +# Helper function for Obol API calls +obol_api() { + curl -s "https://api.obol.tech$1" | python3 -c "import sys,json; print(json.dumps(json.load(sys.stdin),indent=2))" +} + +# Example +obol_api "/v1/lock/0x4d6e7f8a..." +``` + +--- + +## Tool Selection Guide + +### "I have a lock_hash" + +| Goal | API Endpoint | curl | +|------|-------------|------| +| Cluster config | `GET /v1/lock/{lockHash}` | `curl -s "https://api.obol.tech/v1/lock/0x..."` | +| Validator performance | `GET /v1/effectiveness/{lockHash}` | `curl -s "https://api.obol.tech/v1/effectiveness/0x..."` | +| Validator beacon states | `GET /v1/state/{lockHash}` | `curl -s "https://api.obol.tech/v1/state/0x..."` | +| Exit status summary | `GET /v1/exp/exit/status/summary/{lockHash}` | `curl -s "https://api.obol.tech/v1/exp/exit/status/summary/0x..."` | +| Detailed exit status | `GET /v1/exp/exit/status/{lockHash}` | `curl -s "https://api.obol.tech/v1/exp/exit/status/0x..."` | + +### "I have a config_hash" + +| Goal | API Endpoint | curl | +|------|-------------|------| +| Pre-DKG definition | `GET /v1/definition/{configHash}` | `curl -s "https://api.obol.tech/v1/definition/0x..."` | +| Cluster lock (if DKG done) | `GET /v1/lock/configHash/{configHash}` | `curl -s "https://api.obol.tech/v1/lock/configHash/0x..."` | + +### "I have an operator address (0x...)" + +| Goal | API Endpoint | +|------|-------------| +| All clusters | `GET /v1/lock/operator/{address}` | +| Cluster definitions | `GET /v1/definition/operator/{address}` | +| Badges (Lido, EtherFi) | `GET /v1/address/badges/{address}` | +| Techne credential level | `GET /v1/address/techne/{address}` | +| Token incentives | `GET /v1/address/incentives/{network}/{address}` | +| T&Cs signed? | `GET /v1/termsAndConditions/{address}` | + +### "I want to explore a network (mainnet / holesky / sepolia)" + +| Goal | API Endpoint | +|------|-------------| +| All clusters | `GET /v1/lock/network/{network}` | +| Network statistics | `GET /v1/lock/network/summary/{network}` | +| Search clusters | `GET /v1/lock/search/{network}?q=...` | +| All operators | `GET /v1/address/network/{network}` | +| Search operators | `GET /v1/address/search/{network}?q=...` | + +### Other + +| Goal | API Endpoint | +|------|-------------| +| Migrateable validators | `GET /v1/address/migrateable-validators/{network}/{withdrawalAddress}` | +| OWR tranches | `GET /v1/owr/{network}/{address}` | +| API health | `GET /v1/_health` | + +--- + +## Common Workflows + +### Investigate a cluster's health + +```bash +# 1. Get cluster config (nested under cluster_definition) +curl -s "https://api.obol.tech/v1/lock/0x4d6e7f8a..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +cd = d.get('cluster_definition', {}) +ops = cd.get('operators', []) +dvs = d.get('distributed_validators', []) +print(f'Cluster: {cd.get(\"name\",\"?\")}') +print(f'Threshold: {cd.get(\"threshold\",\"?\")}-of-{len(ops)}') +print(f'Validators: {len(dvs)}') +" + +# 2. Check effectiveness (dict keyed by pubkey, time-period scores) +curl -s "https://api.obol.tech/v1/effectiveness/0x4d6e7f8a..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +for pk, scores in d.items(): + eff = scores.get('sevenDay', 0) + status = 'healthy' if eff > 0.95 else 'degraded' if eff > 0.8 else 'CRITICAL' + print(f'{pk[:16]}... 7d={eff:.3f} [{status}]') +" + +# 3. Check validator states (dict keyed by pubkey, balance in ETH) +curl -s "https://api.obol.tech/v1/state/0x4d6e7f8a..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +for pk, v in d.items(): + print(f'{pk[:16]}... {v.get(\"status\",\"?\")} {v.get(\"balance\",\"?\")} ETH') +" +``` + +If effectiveness is low: one or more operators offline, misconfigured Charon, network latency, or a validator stuck in `exiting` state. + +### Coordinate a voluntary exit + +```bash +# 1. Exit status summary +curl -s "https://api.obol.tech/v1/exp/exit/status/summary/0x..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +print(f'Ready to exit: {d.get(\"ready_exits\", 0)}') +for addr, count in d.get('operator_exits', {}).items(): + print(f' {addr[:12]}... signed: {count}') +" +``` + +Exit is broadcast automatically once `threshold` operators have submitted their exit signature shares. + +### Audit an operator + +```bash +# Techne level (response is tiers with arrays) +curl -s "https://api.obol.tech/v1/address/techne/0xAbCd..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +for tier in ['gold', 'silver', 'bronze', 'base']: + if d.get(tier): + print(f'Level: {tier} (earned {d[tier][0].get(\"earned_at\",\"?\")})') + break +else: + print('Level: none') +" + +# Badges (qualified=true means earned) +curl -s "https://api.obol.tech/v1/address/badges/0xAbCd..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +earned = [b['name'] for b in d.get('badges',[]) if b.get('qualified')] +print(f'Badges: {earned if earned else \"none\"}')" + +# Cluster count (paginated response) +curl -s "https://api.obol.tech/v1/lock/operator/0xAbCd..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +print(f'Active in {d.get(\"total_count\", len(d.get(\"cluster_locks\", [])))} cluster(s)')" + +# T&Cs signed? +curl -s "https://api.obol.tech/v1/termsAndConditions/0xAbCd..." | python3 -c " +import sys,json; d=json.load(sys.stdin) +print(f'T&Cs signed: {d.get(\"isTermsAndConditionsSigned\", False)}')" +``` + +--- + +## How to Talk About DVs + +**Do say:** +- "Your cluster has 4 operators with a 3-of-4 threshold, so it tolerates one operator going offline." +- "The cluster lock (identified by its `lock_hash`) is the source of truth after DKG." +- "Effectiveness of 0.95 means the validator is attesting in ~95% of expected slots." +- "Exit coordination requires threshold operators to submit their key shares of the exit message." + +**Avoid:** +- Saying the private key is "split into pieces" — it's threshold cryptography; no full key is ever assembled. +- Saying a cluster "fails" if one operator goes offline — it degrades gracefully until the threshold is not met. +- Confusing `config_hash` (pre-DKG) with `lock_hash` (post-DKG). + +## Identifier Formats + +- `lock_hash` and `config_hash`: hex strings starting with `0x`, typically 66 characters +- Operator `address`: standard Ethereum address, `0x` + 40 hex chars +- Validator `pubkey`: BLS public key, `0x` + 96 hex chars + +All identifiers are **case-sensitive** in Obol API calls. If a user provides an address without `0x`, remind them to include it. + +**Networks:** `mainnet` (real ETH), `hoodi` (staking/infra testnet, successor to holesky), `holesky` (legacy testnet), `sepolia` (secondary testnet) + +**Validator status values:** `active_ongoing`, `active_exiting`, `active_slashed`, `exited_unslashed`, `exited_slashed`, `withdrawal_possible`, `withdrawal_done`, `pending_*` + +## Examples + +For parameter shapes, response field reference, and example conversation patterns, see: +`references/api-examples.md` + +## Limitations + +- All API calls are **read-only** — creating clusters, running DKG, and submitting exits require authenticated POST endpoints +- Exit status endpoints are under `/v1/exp/` (experimental) — pagination is 1-indexed +- API rate limits apply; if timeouts occur, check `GET /v1/_health` first +- mcporter MCP integration requires the obol-mcp server to be installed (pip not available in pod currently) diff --git a/skills/obol-dvt/references/api-examples.md b/skills/obol-dvt/references/api-examples.md new file mode 100644 index 0000000..a898352 --- /dev/null +++ b/skills/obol-dvt/references/api-examples.md @@ -0,0 +1,326 @@ +# Obol API — Example Calls & Responses + +Illustrative examples of API parameters and the key fields returned. +Use these to interpret real API responses and explain them to users. + +--- + +## Cluster Lock — `GET /v1/lock/{lockHash}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/lock/0x4d6e7f8a9b..." +``` + +**Response shape (key fields):** +```json +{ + "cluster_definition": { + "name": "my-dv-cluster", + "uuid": "abc123...", + "version": "v1.8.0", + "num_validators": 4, + "threshold": 3, + "config_hash": "0x1a2b3c4d5e...", + "fork_version": "0x00000000", + "timestamp": "2024-03-15T10:22:00Z", + "operators": [ + { "address": "0xAbCd...1234", "enr": "enr:-...", "enr_signature": "0x...", "config_signature": "0x..." } + ], + "validators": [ + { "fee_recipient_address": "0xDead...Beef", "withdrawal_address": "0x..." } + ] + }, + "distributed_validators": [ + { "distributed_public_key": "0xb3a2c1...", "public_shares": ["0x...", "0x..."] } + ], + "lock_hash": "0x4d6e7f8a9b..." +} +``` + +**What to tell the user:** "Your cluster has 4 operators with a 3-of-4 threshold. It was created +on 2024-03-15. Validator public keys are in `distributed_validators[].distributed_public_key`." + +**Key access patterns:** +- Cluster name: `d["cluster_definition"]["name"]` +- Threshold: `d["cluster_definition"]["threshold"]` +- Operators: `d["cluster_definition"]["operators"]` +- Validator pubkeys: `[v["distributed_public_key"] for v in d["distributed_validators"]]` +- Lock hash: `d["lock_hash"]` + +--- + +## Effectiveness — `GET /v1/effectiveness/{lockHash}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/effectiveness/0x4d6e7f8a9b..." +``` + +**Response shape:** +```json +{ + "0xb3a2c1...": { + "oneDay": 0.998, + "sevenDay": 0.995, + "thirtyDay": 0.991, + "all": 0.987 + }, + "0xc4d3e2...": { + "oneDay": 0.0, + "sevenDay": 0.612, + "thirtyDay": 0.608, + "all": 0.550 + } +} +``` + +**Notes:** +- Response is a dict keyed by validator public key (not an array). +- Each entry has time-period scores: `oneDay`, `sevenDay`, `thirtyDay`, `all`. +- Scores are 0–1; anything above ~0.95 is healthy. +- A `oneDay` of 0.0 with non-zero `sevenDay` usually means a recent outage. +- Low scores usually mean one or more operators are offline or have connectivity issues. + +**Parsing:** +```python +for pubkey, scores in d.items(): + eff = scores.get('sevenDay', 0) + status = 'healthy' if eff > 0.95 else 'degraded' if eff > 0.8 else 'CRITICAL' + print(f'{pubkey[:16]}... 7d={eff:.3f} [{status}]') +``` + +--- + +## Validator States — `GET /v1/state/{lockHash}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/state/0x4d6e7f8a9b..." +``` + +**Response shape:** +```json +{ + "0xb3a2c1...": { + "index": "412503", + "status": "active_ongoing", + "balance": "32.045231042", + "effective_balance": "32.0", + "withdrawal_credentials": "0x01..." + }, + "0xc4d3e2...": { + "index": "412504", + "status": "active_exiting", + "balance": "32.001000000", + "effective_balance": "32.0", + "withdrawal_credentials": "0x01..." + } +} +``` + +**Notes:** +- Response is a dict keyed by validator public key. +- `balance` is in ETH (decimal string), not Gwei. +- `active_exiting` means an exit has been initiated; pair with exit status summary to see signing progress. +- May return `{}` if the cluster has no active validators on the beacon chain. + +--- + +## Exit Status Summary — `GET /v1/exp/exit/status/summary/{lockHash}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/exp/exit/status/summary/0x4d6e7f8a9b..." +``` + +**Response shape:** +```json +{ + "operator_exits": { + "0xAbCd...1234": 3, + "0xEfGh...5678": 3, + "0xIjKl...9012": 2, + "0xMnOp...3456": 1 + }, + "ready_exits": 1 +} +``` + +**What to tell the user:** "1 validator has reached the signing threshold and is ready to exit. +Operators `0xIjKl...` and `0xMnOp...` have signed fewer exits than the others — they need to +sign more to unlock the remaining validators." + +**Key access patterns:** +- Ready count: `d["ready_exits"]` +- Per-operator signed count: `d["operator_exits"]` (dict of address → count) + +--- + +## Detailed Exit Status — `GET /v1/exp/exit/status/{lockHash}` + +**curl (filtered by validator):** +```bash +curl -s "https://api.obol.tech/v1/exp/exit/status/0x4d6e7f8a9b...?validatorPubkey=0xc4d3e2...&page=1&limit=10" +``` + +**Note:** This endpoint uses 1-indexed pagination (start at `page=1`, not `page=0`). + +--- + +## Cluster Definition (pre-DKG) — `GET /v1/definition/{configHash}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/definition/0x1a2b3c4d5e..." +``` + +**Response shape:** +```json +{ + "config_hash": "0x1a2b3c4d5e...", + "name": "my-dv-cluster", + "version": "v1.8.0", + "num_validators": 4, + "threshold": 3, + "fork_version": "0x00000000", + "timestamp": "2024-03-14T08:00:00Z", + "operators": [ + { "address": "0xAbCd...1234", "config_signature": "", "enr_signature": "" }, + { "address": "0xEfGh...5678", "config_signature": "0x...", "enr_signature": "0x..." } + ] +} +``` + +**What to tell the user:** "DKG hasn't happened yet. An operator has signed when their +`config_signature` is non-empty. Operators with empty signatures still need to approve." + +--- + +## Operator Techne — `GET /v1/address/techne/{address}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/address/techne/0xAbCd...1234" +``` + +**Response shape:** +```json +{ + "base": [], + "bronze": [], + "silver": [ + { "image_url": "https://nft-cdn.alchemy.com/...", "earned_at": "2025-04-21T19:28:21.949Z" } + ], + "gold": [] +} +``` + +**Notes:** +- Response is an object with arrays per tier: `base`, `bronze`, `silver`, `gold`. +- A non-empty array means the operator has earned that credential level. +- Tiers: `base` < `bronze` < `silver` < `gold`. Higher = sustained high-quality operation. +- To determine the highest earned tier, check arrays from gold down to base. + +**Parsing:** +```python +for tier in ['gold', 'silver', 'bronze', 'base']: + if d.get(tier): + print(f'Level: {tier} (earned {d[tier][0].get("earned_at", "?")})') + break +else: + print('Level: none') +``` + +--- + +## Operator Badges — `GET /v1/address/badges/{address}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/address/badges/0xAbCd...1234" +``` + +**Response shape:** +```json +{ + "badges": [ + { + "name": "Lido Mainnet", + "description": "Participation in the Lido x Obol Simple DVT module on mainnet.", + "image_url": "https://api.obol.tech/public/lido_mainnet.png", + "qualified": true, + "earned_at": "2024-06-15T12:00:00Z" + }, + { + "name": "Genesis Dappnode", + "description": "Holder of one of the original 60 Obol Genesis Dappnodes.", + "image_url": "https://api.obol.tech/public/genesis_dappnode.png", + "qualified": false, + "earned_at": null + } + ] +} +``` + +**Notes:** +- `qualified: true` with a non-null `earned_at` = badge earned. +- `qualified: false` = not earned (informational listing). +- Badge names include: "Lido Testnet", "Lido Mainnet", "Genesis Dappnode", etc. + +--- + +## Network Summary — `GET /v1/lock/network/summary/{network}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/lock/network/summary/mainnet" +``` + +**Response shape:** +```json +{ + "eth_staked": 596790, + "total_clusters": 99, + "total_operators": 294 +} +``` + +**Notes:** +- `eth_staked` is total ETH staked across all DVT clusters on the network. +- Currently only `mainnet` returns reliable data; other networks may return errors. + +--- + +## Terms & Conditions — `GET /v1/termsAndConditions/{address}` + +**curl:** +```bash +curl -s "https://api.obol.tech/v1/termsAndConditions/0xAbCd...1234" +``` + +**Response shape:** +```json +{ + "isTermsAndConditionsSigned": true +} +``` + +--- + +## Example Conversations + +**"My cluster 0x4d6e... has terrible performance, what's wrong?"** +1. Call `GET /v1/effectiveness/0x4d6e...` — check which validators underperform (look at `sevenDay` scores) +2. Call `GET /v1/state/0x4d6e...` — check for non-`active_ongoing` statuses +3. Call `GET /v1/lock/0x4d6e...` — cross-reference `cluster_definition.operators` to identify likely offline operators + +**"How do I exit validator 0xb3a2c1...?"** +Exits are initiated in the Charon/validator client (write operation, not available here). +Use `GET /v1/exp/exit/status/0x...?validatorPubkey=0xb3a2c1...` to show current signing progress. + +**"Is 0xAbCd... a trustworthy operator?"** +1. `GET /v1/address/techne/0xAbCd...` — check tiers for highest earned credential +2. `GET /v1/address/badges/0xAbCd...` — protocol affiliations (look for `qualified: true`) +3. `GET /v1/lock/operator/0xAbCd...` — how many clusters they run (check `total_count`) +4. `GET /v1/termsAndConditions/0xAbCd...` — T&C compliance (`isTermsAndConditionsSigned`) diff --git a/skills/obol-k8s/SKILL.md b/skills/obol-k8s/SKILL.md new file mode 100644 index 0000000..436a6b9 --- /dev/null +++ b/skills/obol-k8s/SKILL.md @@ -0,0 +1,159 @@ +--- +name: obol-k8s +description: "Kubernetes cluster awareness via ServiceAccount API. Use when: checking pod status, reading logs, listing services, viewing events, diagnosing deployment issues, or inspecting resource health in own namespace. NOT for: cross-namespace operations, creating/modifying resources, network management (use obol-network), or DVT operations (use obol-dvt)." +metadata: { "openclaw": { "emoji": "☸️", "requires": { "bins": ["curl", "python3"] } } } +--- + +# Obol K8s + +Monitor your Kubernetes environment using the mounted ServiceAccount token. Read-only access to pods, logs, services, events, and more within your own namespace. + +## Paths + +All script paths in this document are relative to this skill's directory. When running from the pod, prefix with the skill's installed location: + +```bash +python3 /data/.openclaw/skills-injected/obol-k8s/scripts/kube.py pods +``` + +## When to Use + +- "What pods are running?" +- "Show me the logs for the openclaw pod" +- "Are there any warning events?" +- "What services are available?" +- "Why is a pod crashing?" +- "How many replicas are ready?" +- Diagnosing deployment issues (restarts, OOMKill, image pull errors) + +## When NOT to Use + +- Cross-namespace operations — SA is scoped to own namespace only +- Creating or modifying resources — read-only access +- Network deployment management — use the obol CLI +- Blockchain queries — use `obol-blockchain` +- DVT cluster monitoring — use `obol-dvt` + +## Scope + +**Read-only access to own namespace only.** The ServiceAccount has `get`, `list`, `watch` permissions on: + +| Resource | API Group | +|----------|-----------| +| Pods | core | +| Pods/log | core | +| Services | core | +| ConfigMaps | core | +| Events | core | +| PersistentVolumeClaims | core | +| Deployments | apps | +| ReplicaSets | apps | +| StatefulSets | apps | +| Jobs | batch | +| CronJobs | batch | + +**Cannot:** list namespaces, read other namespaces, create/update/delete resources. + +## Quick Start + +```bash +# List all pods with status +python3 scripts/kube.py pods + +# Get logs from a pod +python3 scripts/kube.py logs openclaw-7f8b9c6d5-x2k4j + +# Recent warning events +python3 scripts/kube.py events --type Warning + +# List services +python3 scripts/kube.py services + +# Deployment status +python3 scripts/kube.py deployments + +# List configmaps +python3 scripts/kube.py configmaps + +# Full details of a resource (outputs JSON) +python3 scripts/kube.py describe pod openclaw-7f8b9c6d5-x2k4j +``` + +## Direct curl + +The SA token and CA cert are mounted in the pod. You can query the Kubernetes API directly: + +```bash +# Setup variables +TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) +NS=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) +API="https://kubernetes.default.svc" +CA="--cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + +# List pods +curl -s $CA -H "Authorization: Bearer $TOKEN" \ + "$API/api/v1/namespaces/$NS/pods" | python3 -c " +import sys,json +pods = json.load(sys.stdin)['items'] +for p in pods: + name = p['metadata']['name'] + phase = p['status']['phase'] + restarts = sum(c.get('restartCount',0) for c in p['status'].get('containerStatuses',[])) + print(f'{name} {phase} restarts={restarts}') +" + +# Get pod logs (last 50 lines) +curl -s $CA -H "Authorization: Bearer $TOKEN" \ + "$API/api/v1/namespaces/$NS/pods//log?tailLines=50" + +# List events (warnings only) +curl -s $CA -H "Authorization: Bearer $TOKEN" \ + "$API/api/v1/namespaces/$NS/events?fieldSelector=type=Warning" +``` + +## Interpreting Pod Status + +| Phase | Meaning | +|-------|---------| +| `Running` | Pod is executing normally | +| `Pending` | Waiting to be scheduled (check events for reason) | +| `Succeeded` | All containers exited successfully (common for Jobs) | +| `Failed` | All containers terminated, at least one failed | +| `Unknown` | Pod state cannot be determined | + +### Container States + +| State | Common Cause | +|-------|-------------| +| `Waiting: CrashLoopBackOff` | Container crashes repeatedly. Check logs. | +| `Waiting: ImagePullBackOff` | Cannot pull container image. Check image name/tag. | +| `Waiting: ContainerCreating` | Pulling image or mounting volumes. | +| `Terminated: OOMKilled` | Out of memory. Pod needs higher memory limits. | +| `Terminated: Error` | Container exited with non-zero code. Check logs. | + +## Troubleshooting Patterns + +### Pod Won't Start + +1. `python3 scripts/kube.py pods` — check status +2. `python3 scripts/kube.py events --type Warning` — look for scheduling or image errors +3. `python3 scripts/kube.py describe pod ` — check conditions and container state + +### Pod Keeps Restarting + +1. `python3 scripts/kube.py pods` — check restart count +2. `python3 scripts/kube.py logs ` — check last log output before crash +3. Look for OOMKilled in container status — if so, memory limit too low + +### Service Not Reachable + +1. `python3 scripts/kube.py services` — verify service exists and ports +2. `python3 scripts/kube.py pods` — verify backing pods are Running +3. `python3 scripts/kube.py describe service ` — check endpoints + +## Constraints + +- **Read-only** — cannot create, modify, or delete any resources +- **Own namespace only** — cannot see other namespaces or cluster-level resources +- **No kubectl** — uses curl + SA token (kubectl binary not installed in pod) +- **Formatted output** — list commands (`pods`, `services`, etc.) output human-readable text; `describe` outputs indented JSON diff --git a/skills/obol-k8s/scripts/kube.py b/skills/obol-k8s/scripts/kube.py new file mode 100644 index 0000000..08139d2 --- /dev/null +++ b/skills/obol-k8s/scripts/kube.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python3 +"""Kubernetes API helper for Obol Stack OpenClaw pods. + +Uses the mounted ServiceAccount token to query the Kubernetes API. +No kubectl required — pure HTTP via urllib. + +Usage: + python3 kube.py [args] + +Commands: + pods List pods with status + logs [--tail N] Get pod logs + events [--type Warning] List events + services List services + deployments List deployments + configmaps List configmaps + describe Get full resource detail +""" + +import json +import os +import ssl +import sys +import urllib.request +from datetime import datetime, timezone + +# ServiceAccount paths +SA_DIR = "/var/run/secrets/kubernetes.io/serviceaccount" +TOKEN_PATH = os.path.join(SA_DIR, "token") +NS_PATH = os.path.join(SA_DIR, "namespace") +CA_PATH = os.path.join(SA_DIR, "ca.crt") +API_SERVER = "https://kubernetes.default.svc" + + +def load_sa(): + """Load ServiceAccount token and namespace.""" + try: + with open(TOKEN_PATH) as f: + token = f.read().strip() + with open(NS_PATH) as f: + namespace = f.read().strip() + return token, namespace + except FileNotFoundError: + print("Error: ServiceAccount not mounted. Are you running inside a Kubernetes pod?", file=sys.stderr) + sys.exit(1) + + +def make_ssl_context(): + """Create SSL context with the cluster CA.""" + ctx = ssl.create_default_context() + if os.path.exists(CA_PATH): + ctx.load_verify_locations(CA_PATH) + return ctx + + +def api_get(path, token, ssl_ctx): + """GET request to the Kubernetes API.""" + url = f"{API_SERVER}{path}" + req = urllib.request.Request(url, headers={"Authorization": f"Bearer {token}"}) + try: + with urllib.request.urlopen(req, context=ssl_ctx, timeout=15) as resp: + return json.loads(resp.read()) + except urllib.error.HTTPError as e: + body = e.read().decode() if e.fp else "" + print(f"API error {e.code}: {body[:200]}", file=sys.stderr) + sys.exit(1) + + +def age(timestamp_str): + """Convert ISO timestamp to human-readable age.""" + if not timestamp_str: + return "?" + try: + ts = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00")) + delta = datetime.now(timezone.utc) - ts + secs = int(delta.total_seconds()) + if secs < 60: + return f"{secs}s" + if secs < 3600: + return f"{secs // 60}m" + if secs < 86400: + return f"{secs // 3600}h" + return f"{secs // 86400}d" + except (ValueError, TypeError): + return "?" + + +def cmd_pods(ns, token, ssl_ctx): + """List pods with status, restarts, and age.""" + data = api_get(f"/api/v1/namespaces/{ns}/pods", token, ssl_ctx) + items = data.get("items", []) + if not items: + print("No pods found.") + return + + print(f"{'NAME':<50} {'STATUS':<20} {'RESTARTS':<10} {'AGE':<8}") + print("-" * 90) + for pod in items: + name = pod["metadata"]["name"] + phase = pod["status"].get("phase", "Unknown") + created = pod["metadata"].get("creationTimestamp", "") + + restarts = 0 + container_statuses = pod["status"].get("containerStatuses", []) + for cs in container_statuses: + restarts += cs.get("restartCount", 0) + # Show waiting reason if not Running + state = cs.get("state", {}) + if "waiting" in state: + reason = state["waiting"].get("reason", "") + if reason: + phase = reason + + print(f"{name:<50} {phase:<20} {restarts:<10} {age(created):<8}") + + +def cmd_logs(ns, token, ssl_ctx, pod_name, tail=100): + """Get pod logs.""" + path = f"/api/v1/namespaces/{ns}/pods/{pod_name}/log?tailLines={tail}" + url = f"{API_SERVER}{path}" + req = urllib.request.Request(url, headers={"Authorization": f"Bearer {token}"}) + try: + with urllib.request.urlopen(req, context=make_ssl_context(), timeout=30) as resp: + print(resp.read().decode(errors="replace")) + except urllib.error.HTTPError as e: + print(f"Error getting logs: {e.code}", file=sys.stderr) + sys.exit(1) + + +def cmd_events(ns, token, ssl_ctx, event_type=None): + """List events, optionally filtered by type.""" + path = f"/api/v1/namespaces/{ns}/events" + if event_type: + path += f"?fieldSelector=type={event_type}" + + data = api_get(path, token, ssl_ctx) + items = data.get("items", []) + if not items: + print("No events found.") + return + + # Sort by last timestamp + items.sort(key=lambda e: e.get("lastTimestamp", "") or e.get("metadata", {}).get("creationTimestamp", "")) + + print(f"{'AGE':<8} {'TYPE':<10} {'REASON':<25} {'OBJECT':<35} {'MESSAGE'}") + print("-" * 120) + for ev in items[-30:]: # Last 30 events + ts = ev.get("lastTimestamp") or ev.get("metadata", {}).get("creationTimestamp", "") + etype = ev.get("type", "?") + reason = ev.get("reason", "?") + obj_ref = ev.get("involvedObject", {}) + obj = f"{obj_ref.get('kind', '?')}/{obj_ref.get('name', '?')}" + msg = ev.get("message", "")[:80] + print(f"{age(ts):<8} {etype:<10} {reason:<25} {obj:<35} {msg}") + + +def cmd_services(ns, token, ssl_ctx): + """List services with type and ports.""" + data = api_get(f"/api/v1/namespaces/{ns}/services", token, ssl_ctx) + items = data.get("items", []) + if not items: + print("No services found.") + return + + print(f"{'NAME':<40} {'TYPE':<15} {'CLUSTER-IP':<18} {'PORTS'}") + print("-" * 100) + for svc in items: + name = svc["metadata"]["name"] + stype = svc["spec"].get("type", "ClusterIP") + cluster_ip = svc["spec"].get("clusterIP", "None") + ports = [] + for p in svc["spec"].get("ports", []): + port_str = f"{p.get('port', '?')}/{p.get('protocol', 'TCP')}" + if p.get("targetPort"): + port_str += f"->{p['targetPort']}" + ports.append(port_str) + print(f"{name:<40} {stype:<15} {cluster_ip:<18} {', '.join(ports)}") + + +def cmd_deployments(ns, token, ssl_ctx): + """List deployments with ready/desired counts.""" + data = api_get(f"/apis/apps/v1/namespaces/{ns}/deployments", token, ssl_ctx) + items = data.get("items", []) + if not items: + print("No deployments found.") + return + + print(f"{'NAME':<40} {'READY':<12} {'UP-TO-DATE':<12} {'AVAILABLE':<12} {'AGE':<8}") + print("-" * 90) + for dep in items: + name = dep["metadata"]["name"] + created = dep["metadata"].get("creationTimestamp", "") + status = dep.get("status", {}) + desired = dep["spec"].get("replicas", 0) + ready = status.get("readyReplicas", 0) + updated = status.get("updatedReplicas", 0) + available = status.get("availableReplicas", 0) + print(f"{name:<40} {ready}/{desired:<10} {updated:<12} {available:<12} {age(created):<8}") + + +def cmd_configmaps(ns, token, ssl_ctx): + """List configmaps.""" + data = api_get(f"/api/v1/namespaces/{ns}/configmaps", token, ssl_ctx) + items = data.get("items", []) + if not items: + print("No configmaps found.") + return + + print(f"{'NAME':<50} {'DATA KEYS':<6} {'AGE':<8}") + print("-" * 70) + for cm in items: + name = cm["metadata"]["name"] + created = cm["metadata"].get("creationTimestamp", "") + data_keys = len(cm.get("data", {})) + print(f"{name:<50} {data_keys:<6} {age(created):<8}") + + +def cmd_describe(ns, token, ssl_ctx, resource_type, name): + """Get full resource detail as JSON.""" + type_map = { + "pod": f"/api/v1/namespaces/{ns}/pods/{name}", + "service": f"/api/v1/namespaces/{ns}/services/{name}", + "deployment": f"/apis/apps/v1/namespaces/{ns}/deployments/{name}", + "configmap": f"/api/v1/namespaces/{ns}/configmaps/{name}", + "event": f"/api/v1/namespaces/{ns}/events/{name}", + "pvc": f"/api/v1/namespaces/{ns}/persistentvolumeclaims/{name}", + "statefulset": f"/apis/apps/v1/namespaces/{ns}/statefulsets/{name}", + "job": f"/apis/batch/v1/namespaces/{ns}/jobs/{name}", + "cronjob": f"/apis/batch/v1/namespaces/{ns}/cronjobs/{name}", + "replicaset": f"/apis/apps/v1/namespaces/{ns}/replicasets/{name}", + } + + path = type_map.get(resource_type) + if not path: + print(f"Unknown resource type: {resource_type}", file=sys.stderr) + print(f"Supported: {', '.join(sorted(type_map.keys()))}", file=sys.stderr) + sys.exit(1) + + data = api_get(path, token, ssl_ctx) + print(json.dumps(data, indent=2)) + + +def main(): + if len(sys.argv) < 2: + print("Usage: python3 kube.py [args]") + print("\nCommands:") + print(" pods List pods with status") + print(" logs [--tail N] Get pod logs (default 100 lines)") + print(" events [--type Warning] List events") + print(" services List services") + print(" deployments List deployments") + print(" configmaps List configmaps") + print(" describe Get full resource detail") + sys.exit(1) + + token, ns = load_sa() + ssl_ctx = make_ssl_context() + cmd = sys.argv[1] + + if cmd == "pods": + cmd_pods(ns, token, ssl_ctx) + elif cmd == "logs": + if len(sys.argv) < 3: + print("Usage: python3 kube.py logs [--tail N]", file=sys.stderr) + sys.exit(1) + pod_name = sys.argv[2] + tail = 100 + if "--tail" in sys.argv: + idx = sys.argv.index("--tail") + if idx + 1 < len(sys.argv): + tail = int(sys.argv[idx + 1]) + cmd_logs(ns, token, ssl_ctx, pod_name, tail) + elif cmd == "events": + event_type = None + if "--type" in sys.argv: + idx = sys.argv.index("--type") + if idx + 1 < len(sys.argv): + event_type = sys.argv[idx + 1] + cmd_events(ns, token, ssl_ctx, event_type) + elif cmd == "services": + cmd_services(ns, token, ssl_ctx) + elif cmd == "deployments": + cmd_deployments(ns, token, ssl_ctx) + elif cmd == "configmaps": + cmd_configmaps(ns, token, ssl_ctx) + elif cmd == "describe": + if len(sys.argv) < 4: + print("Usage: python3 kube.py describe ", file=sys.stderr) + sys.exit(1) + cmd_describe(ns, token, ssl_ctx, sys.argv[2], sys.argv[3]) + else: + print(f"Unknown command: {cmd}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main()