diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..26063b1 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,53 @@ +# VCS / CI +.git +.github/ + +# Editor / IDE +.claude/ +.vscode/ +.idea/ + +# Build artifacts +bin/ +mpcd-linux-* + +# Not needed for build (ui/ IS needed -- Stage 1 builds it) +contracts/ +dashboard/ +data/ +deployments/ +docs/ +e2e/ +examples/ +identity/ +images/ +k8s/ +minio/ +scripts/ + +# ui/ build artifacts (source needed, artifacts not) +ui/node_modules +ui/dist + +# Test / coverage +testdata/ +*_test.go +coverage/ + +# Vendored deps (go mod download in Dockerfile) +vendor/ + +# Go workspace (multi-module dev only) +go.work +go.work.sum + +# Config / secrets +.env* +compose.yml +cloudbuild.yaml +config.yaml +Makefile + +# Docs +*.md +LICENSE diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..a61add9 --- /dev/null +++ b/.env.example @@ -0,0 +1,8 @@ +# PostgreSQL +POSTGRES_PASSWORD=your-secure-password-here + +# JWT Secret (generate with: openssl rand -hex 32) +JWT_SECRET=your-jwt-secret-here + +# API URL for dashboard +NEXT_PUBLIC_API_URL=http://localhost:8081/api/v1 diff --git a/.gcloudignore b/.gcloudignore new file mode 100644 index 0000000..d143cdc --- /dev/null +++ b/.gcloudignore @@ -0,0 +1,2 @@ +#!include:.dockerignore +.github/ diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 0000000..0355e51 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,180 @@ +name: Benchmarks + +on: + push: + branches: [master] + pull_request: + branches: [master] + workflow_dispatch: + inputs: + benchmark_time: + description: 'Benchmark time per test (e.g., 10s, 1m)' + required: false + default: '10s' + +jobs: + unit-benchmarks: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.25.5" + + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Install dependencies + run: go mod download + + - name: Run unit benchmarks + run: | + go test -bench=. -benchmem -benchtime=${{ github.event.inputs.benchmark_time || '10s' }} ./pkg/... | tee benchmark_results.txt + + - name: Upload benchmark results + uses: actions/upload-artifact@v3 + with: + name: unit-benchmark-results + path: benchmark_results.txt + + e2e-benchmarks: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.25.5" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Build binaries + run: | + go build -o lux-mpc ./cmd/lux-mpc + go build -o lux-mpc-cli ./cmd/lux-mpc-cli + chmod +x lux-mpc lux-mpc-cli + + - name: Install binaries + run: | + sudo mv lux-mpc /usr/local/bin/ + sudo mv lux-mpc-cli /usr/local/bin/ + + - name: Install E2E dependencies + run: | + cd e2e + go mod download + + - name: Setup test environment + run: | + cd e2e + docker compose -f docker-compose.test.yaml up -d + sleep 10 + docker compose -f docker-compose.test.yaml ps + + - name: Run E2E benchmarks + run: | + cd e2e + go test -bench=. -benchmem -benchtime=${{ github.event.inputs.benchmark_time || '10s' }} -timeout=30m | tee e2e_benchmark_results.txt + + - name: Upload E2E benchmark results + uses: actions/upload-artifact@v3 + with: + name: e2e-benchmark-results + path: e2e/e2e_benchmark_results.txt + + - name: Cleanup + if: always() + run: | + cd e2e + docker compose -f docker-compose.test.yaml down -v + ./cleanup_test_env.sh || true + + benchmark-comparison: + needs: [unit-benchmarks, e2e-benchmarks] + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + + steps: + - name: Download current benchmark results + uses: actions/download-artifact@v3 + with: + name: unit-benchmark-results + path: current/ + + - name: Download E2E benchmark results + uses: actions/download-artifact@v3 + with: + name: e2e-benchmark-results + path: current/ + + - name: Comment PR with results + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + let comment = '## ๐Ÿ“Š Benchmark Results\n\n'; + + // Read unit benchmark results + if (fs.existsSync('current/benchmark_results.txt')) { + const unitResults = fs.readFileSync('current/benchmark_results.txt', 'utf8'); + comment += '### Unit Benchmarks\n```\n' + unitResults + '\n```\n\n'; + } + + // Read E2E benchmark results + if (fs.existsSync('current/e2e_benchmark_results.txt')) { + const e2eResults = fs.readFileSync('current/e2e_benchmark_results.txt', 'utf8'); + comment += '### E2E Benchmarks\n```\n' + e2eResults + '\n```\n'; + } + + // Find and update or create comment + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + + const botComment = comments.find(comment => + comment.user.type === 'Bot' && comment.body.includes('๐Ÿ“Š Benchmark Results') + ); + + if (botComment) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + body: comment + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: comment + }); + } \ No newline at end of file diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..36f4615 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,62 @@ +name: Build + +on: + push: + branches: [main, test, dev] + workflow_dispatch: + +permissions: + contents: read + packages: write + +jobs: + docker: + strategy: + fail-fast: false + matrix: + include: + - arch: amd64 + runner: luxfi-build-amd64 + - arch: arm64 + runner: luxfi-build-arm64 + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: docker/build-push-action@v6 + with: + context: . + push: true + tags: ghcr.io/${{ github.repository }}:${{ github.ref_name }}-${{ matrix.arch }} + cache-from: type=gha,scope=${{ matrix.arch }} + cache-to: type=gha,scope=${{ matrix.arch }},mode=max + + manifest: + needs: [docker] + if: always() && contains(needs.docker.result, 'success') + runs-on: luxfi-build-amd64 + steps: + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: docker/setup-buildx-action@v3 + - run: | + IMAGE="ghcr.io/${{ github.repository }}" + BRANCH="${{ github.ref_name }}" + SOURCES="" + for ARCH in amd64 arm64; do + if docker buildx imagetools inspect "${IMAGE}:${BRANCH}-${ARCH}" >/dev/null 2>&1; then + SOURCES="${SOURCES} ${IMAGE}:${BRANCH}-${ARCH}" + fi + done + [ -z "$SOURCES" ] && exit 1 + for TAG in "${BRANCH}" latest; do + docker buildx imagetools create -t "${IMAGE}:${TAG}" ${SOURCES} + done diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..5cd2ad3 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,328 @@ +name: CI Pipeline + +on: + push: + branches: ["*"] + pull_request: + branches: ["*"] + +jobs: + test: + runs-on: ubuntu-latest + env: + GOEXPERIMENT: runtimesecret + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.26" + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Download dependencies + run: GOWORK=off go mod download + + - name: Run unit tests + run: GOWORK=off CGO_ENABLED=1 go test -v -race -coverprofile=coverage.out ./... + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + file: ./coverage.out + flags: unittests + name: codecov-umbrella + + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.26" + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v6 + env: + GOWORK: "off" + CGO_ENABLED: "0" + with: + version: latest + install-mode: goinstall + args: --timeout=5m + skip-cache: true + + # Security vulnerability scanning + security-scan: + runs-on: ubuntu-latest + name: Security Vulnerability Scan + permissions: + actions: read + contents: read + security-events: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.26" + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Vendor dependencies + run: GOWORK=off go mod vendor + + - name: Install govulncheck + run: go install golang.org/x/vuln/cmd/govulncheck@latest + + - name: Run govulncheck and generate SARIF + run: | + echo "Current directory: $(pwd)" + echo "Files in current directory:" + ls -la + govulncheck -format=sarif ./... > govulncheck-results.sarif + + govulncheck -json ./... > vuln.json + count=$(jq '[.[] | select(.finding != null and .finding.trace != null)] | length' vuln.json || echo 0) + echo "Found $count vulnerabilities" + + if [ "$count" -gt 0 ]; then + echo "โš ๏ธ Vulnerabilities found by govulncheck (see Security tab for details)" + else + echo "โœ… No vulnerabilities found by govulncheck" + fi + continue-on-error: true + + - name: Upload govulncheck results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + if: always() + continue-on-error: true + with: + sarif_file: govulncheck-results.sarif + category: govulncheck + + - name: Install gosec + run: go install github.com/securego/gosec/v2/cmd/gosec@latest + + - name: Run gosec security scanner + run: | + gosec -fmt sarif -out gosec-results.sarif -exclude G304 ./... + continue-on-error: true + + - name: Upload gosec results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + if: always() + continue-on-error: true + with: + sarif_file: gosec-results.sarif + category: gosec + + # CodeQL Analysis + codeql-analysis: + name: CodeQL Analysis + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ["go"] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.26" + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Vendor dependencies + run: GOWORK=off go mod vendor + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + queries: +security-and-quality + + - name: Build for CodeQL + run: | + CGO_ENABLED=0 go build -v ./cmd/mpcd + CGO_ENABLED=0 go build -v ./cmd/lux-mpc-cli + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" + + # SBOM Generation + sbom: + runs-on: ubuntu-latest + name: Generate SBOM + permissions: + actions: read + contents: read + security-events: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.26" + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Vendor dependencies + run: GOWORK=off go mod vendor + + - name: Build binaries + run: | + CGO_ENABLED=0 go build -o mpcd ./cmd/mpcd + CGO_ENABLED=0 go build -o lux-mpc-cli ./cmd/lux-mpc-cli + + - name: Generate SBOM with anchore/sbom-action (SPDX-JSON) + uses: anchore/sbom-action@v0 + with: + artifact-name: sbom-spdx.json + output-file: sbom.spdx.json + format: spdx-json + + - name: Generate SBOM with anchore/sbom-action (CycloneDX) + uses: anchore/sbom-action@v0 + with: + artifact-name: sbom-cyclonedx.json + output-file: sbom.cyclonedx.json + format: cyclonedx-json + upload-artifact: false + + - name: Generate SBOM with anchore/sbom-action (Syft JSON) + uses: anchore/sbom-action@v0 + with: + artifact-name: sbom-syft.json + output-file: sbom.syft.json + format: syft-json + upload-artifact: false + + - name: Upload all SBOM artifacts + uses: actions/upload-artifact@v4 + with: + name: sbom-files + path: | + sbom.spdx.json + sbom.cyclonedx.json + sbom.syft.json + retention-days: 30 + + - name: Install Grype + run: | + curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin + + - name: Scan SBOM with Grype + run: | + grype sbom.spdx.json -o sarif --file grype-results.sarif + continue-on-error: true + + - name: Upload Grype results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + if: always() + continue-on-error: true + with: + sarif_file: grype-results.sarif + category: grype + + - name: Display SBOM summary + run: | + echo "๐Ÿ“ฆ SBOM Generation Summary" + echo "=========================" + echo "Generated SBOM files:" + ls -la sbom.* + echo "" + echo "SBOM package count:" + echo "SPDX: $(jq '.packages | length' sbom.spdx.json)" + echo "CycloneDX: $(jq '.components | length' sbom.cyclonedx.json)" + echo "Syft: $(jq '.artifacts | length' sbom.syft.json)" + + build: + runs-on: lux-build-linux-amd64 + needs: [test, lint, codeql-analysis, sbom] + env: + GOEXPERIMENT: runtimesecret + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.26" + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Vendor dependencies + run: GOWORK=off go mod vendor + + - name: Build mpcd + run: CGO_ENABLED=0 GOWORK=off go build -mod=vendor -v ./cmd/mpcd + + - name: Build lux-mpc-cli + run: CGO_ENABLED=0 GOWORK=off go build -mod=vendor -v ./cmd/lux-mpc-cli diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..a91ed50 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,71 @@ +name: Docker + +on: + workflow_dispatch: + push: + tags: ['v*'] + +permissions: + contents: read + packages: write + +jobs: + build: + strategy: + matrix: + include: + - arch: amd64 + runner: ubuntu-latest + - arch: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v4 + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push + run: | + TAG=${GITHUB_REF#refs/tags/} + IMG=ghcr.io/luxfi/mpc:${TAG}-${{ matrix.arch }} + docker build \ + --build-arg GITHUB_TOKEN=${{ secrets.UNIVERSE_PAT }} \ + -t $IMG . + docker push $IMG + + manifest: + needs: build + runs-on: ubuntu-latest + steps: + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Create manifest + run: | + TAG=${GITHUB_REF#refs/tags/} + IMG=ghcr.io/luxfi/mpc + docker manifest create $IMG:$TAG \ + $IMG:${TAG}-amd64 \ + $IMG:${TAG}-arm64 + docker manifest push $IMG:$TAG + + notify: + needs: manifest + if: startsWith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + steps: + - uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.UNIVERSE_PAT }} + repository: luxfi/universe + event-type: image-published + client-payload: | + { + "service": "mpc", + "image": "ghcr.io/luxfi/mpc:${{ github.ref_name }}", + "tag": "${{ github.ref_name }}" + } diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..68b0784 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,64 @@ +name: Deploy Docs to GitHub Pages + +on: + push: + branches: + - main + paths: + - 'docs/**' + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: pages + cancel-in-progress: false + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '22' + cache: 'pnpm' + cache-dependency-path: docs/pnpm-lock.yaml + + - name: Install dependencies + working-directory: docs + run: pnpm install --frozen-lockfile + + - name: Build docs + working-directory: docs + run: pnpm build + + - name: Setup Pages + uses: actions/configure-pages@v5 + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: docs/out + + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml new file mode 100644 index 0000000..67e6a91 --- /dev/null +++ b/.github/workflows/e2e-tests.yml @@ -0,0 +1,334 @@ +name: E2E Integration Tests + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + # Build job that creates the binaries needed by all E2E test jobs + build: + runs-on: ubuntu-latest + outputs: + cache-key: ${{ steps.cache-key.outputs.key }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.25.5" + + - name: Tidy Go modules + run: | + go mod tidy + + - name: Generate cache key + id: cache-key + run: echo "key=${{ runner.os }}-binaries-${{ hashFiles('**/go.sum', '**/*.go') }}" >> $GITHUB_OUTPUT + + - name: Cache binaries + id: cache-binaries + uses: actions/cache@v3 + with: + path: | + ./lux-mpc + ./lux-mpc-cli + key: ${{ steps.cache-key.outputs.key }} + + - name: Cache Go modules + if: steps.cache-binaries.outputs.cache-hit != 'true' + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Install dependencies + if: steps.cache-binaries.outputs.cache-hit != 'true' + run: | + go mod download + cd e2e && go mod download + + - name: Build binaries + if: steps.cache-binaries.outputs.cache-hit != 'true' + run: | + go build -o lux-mpc ./cmd/lux-mpc + go build -o lux-mpc-cli ./cmd/lux-mpc-cli + chmod +x lux-mpc lux-mpc-cli + + # Key Generation E2E Tests + e2e-keygen: + runs-on: ubuntu-latest + needs: build + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.25.5" + + - name: Tidy Go modules + run: | + go mod tidy + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Verify Docker Compose + run: | + docker --version + docker compose version + + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Restore binaries + uses: actions/cache@v3 + with: + path: | + ./lux-mpc + ./lux-mpc-cli + key: ${{ needs.build.outputs.cache-key }} + + - name: Install binaries + run: | + sudo mv lux-mpc /usr/local/bin/ + sudo mv lux-mpc-cli /usr/local/bin/ + + - name: Verify binaries are available + run: | + which lux-mpc + which lux-mpc-cli + lux-mpc --version || echo "lux-mpc binary ready" + lux-mpc-cli --version || echo "lux-mpc-cli binary ready" + + - name: Install E2E dependencies + run: | + cd e2e && go mod tidy && go mod download + + - name: Run Key Generation E2E tests + run: | + cd e2e + go test -v -timeout=1200s -run TestKeyGeneration + env: + DOCKER_BUILDKIT: 1 + + - name: Cleanup Docker containers + if: always() + run: | + cd e2e + docker compose -f docker-compose.test.yaml down -v || true + docker system prune -f || true + + - name: Upload keygen test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: e2e-keygen-test-logs + path: e2e/logs/ + retention-days: 7 + + # Signing E2E Tests + e2e-signing: + runs-on: ubuntu-latest + needs: build + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.25.5" + + - name: Tidy Go modules + run: | + go mod tidy + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Verify Docker Compose + run: | + docker --version + docker compose version + + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Restore binaries + uses: actions/cache@v3 + with: + path: | + ./lux-mpc + ./lux-mpc-cli + key: ${{ needs.build.outputs.cache-key }} + + - name: Install binaries + run: | + sudo mv lux-mpc /usr/local/bin/ + sudo mv lux-mpc-cli /usr/local/bin/ + + - name: Verify binaries are available + run: | + which lux-mpc + which lux-mpc-cli + lux-mpc --version || echo "lux-mpc binary ready" + lux-mpc-cli --version || echo "lux-mpc-cli binary ready" + + - name: Install E2E dependencies + run: | + cd e2e && go mod tidy && go mod download + + - name: Run Signing E2E tests + run: | + cd e2e + go test -v -timeout=1200s -run TestSigning + env: + DOCKER_BUILDKIT: 1 + + - name: Cleanup Docker containers + if: always() + run: | + cd e2e + docker compose -f docker-compose.test.yaml down -v || true + docker system prune -f || true + + - name: Upload signing test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: e2e-signing-test-logs + path: e2e/logs/ + retention-days: 7 + + # Resharing E2E Tests + e2e-resharing: + runs-on: ubuntu-latest + needs: build + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.25.5" + + - name: Tidy Go modules + run: | + go mod tidy + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Verify Docker Compose + run: | + docker --version + docker compose version + + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Restore binaries + uses: actions/cache@v3 + with: + path: | + ./lux-mpc + ./lux-mpc-cli + key: ${{ needs.build.outputs.cache-key }} + + - name: Install binaries + run: | + sudo mv lux-mpc /usr/local/bin/ + sudo mv lux-mpc-cli /usr/local/bin/ + + - name: Verify binaries are available + run: | + which lux-mpc + which lux-mpc-cli + lux-mpc --version || echo "lux-mpc binary ready" + lux-mpc-cli --version || echo "lux-mpc-cli binary ready" + + - name: Install E2E dependencies + run: | + cd e2e && go mod tidy && go mod download + + - name: Run Resharing E2E tests + run: | + cd e2e + go test -v -timeout=1200s -run TestResharing + env: + DOCKER_BUILDKIT: 1 + + - name: Cleanup Docker containers + if: always() + run: | + cd e2e + docker compose -f docker-compose.test.yaml down -v || true + docker system prune -f || true + + - name: Upload resharing test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: e2e-resharing-test-logs + path: e2e/logs/ + retention-days: 7 + + # Summary job that depends on all E2E tests + e2e-summary: + runs-on: ubuntu-latest + needs: [e2e-keygen, e2e-signing, e2e-resharing] + if: always() + + steps: + - name: Check E2E test results + run: | + echo "E2E Test Results Summary:" + echo "=========================" + echo "Key Generation Tests: ${{ needs.e2e-keygen.result }}" + echo "Signing Tests: ${{ needs.e2e-signing.result }}" + echo "Resharing Tests: ${{ needs.e2e-resharing.result }}" + echo "" + + # Check if any tests failed + if [[ "${{ needs.e2e-keygen.result }}" != "success" || "${{ needs.e2e-signing.result }}" != "success" || "${{ needs.e2e-resharing.result }}" != "success" ]]; then + echo "โŒ One or more E2E tests failed" + exit 1 + else + echo "โœ… All E2E tests passed successfully" + fi diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..cdd04d1 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,46 @@ +name: Release MPC + +on: + push: + tags: ['v*'] + +permissions: + contents: write + +jobs: + build: + strategy: + matrix: + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'arm64' && 'hanzo-build-linux-arm64' || 'hanzo-build-linux-amd64' }} + env: + GONOSUMCHECK: '*' + GONOSUMDB: '*' + GOPRIVATE: 'github.com/luxfi/*' + GOPROXY: direct + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '1.26' + - name: Build + run: | + GOWORK=off CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o mpcd ./cmd/mpcd/ + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: mpcd-linux-${{ matrix.arch }} + path: mpcd + + release: + needs: build + runs-on: hanzo-build-linux-amd64 + permissions: + contents: write + steps: + - uses: actions/download-artifact@v4 + with: + path: dist/ + - uses: softprops/action-gh-release@v2 + with: + files: dist/**/* diff --git a/.gitignore b/.gitignore index 751dea4..c95bd86 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,66 @@ -db/ -tmp/ -bin/ +/db/ +/tmp/ +/bin/ identity/ event_initiator.identity.json event_initiator.key event_initiator.key.age +coverage.out +coverage.html + +# E2E test artifacts +e2e/test_db/ +e2e/test_node*/ +e2e/test_event_initiator.* +e2e/coverage.out +e2e/coverage.html +e2e/logs/ +# Generated config file (template is tracked) +e2e/config.test.yaml +logs/ + +# Built binaries +/lux-mpc +/lux-mpc-bridge +/lux-mpc-cli +/mpcd +*-linux-amd64 +*-linux-arm64 +*-darwin-amd64 +*-darwin-arm64 + +# Docker build artifacts +.docker-build/ + +# Dashboard build (legacy Next.js) +dashboard/node_modules/ +dashboard/.next/ +dashboard/package-lock.json + +# Embedded UI build +ui/node_modules/ +ui/dist/ +ui/pnpm-lock.yaml + +# Environment +.env +.env.local + + + +AGENTS.md +CLAUDE.md +GEMINI.md +GROK.md +QWEN.md + +# Local prebuilt binaries +mpcd-linux* + +# Dashboard build artifacts +dashboard/tsconfig.tsbuildinfo + +# Vendor (generated by CI via go mod vendor) +vendor/ +dashboard/playwright-report/ +dashboard/test-results/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..7d636c7 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,22 @@ +run: + timeout: 5m + +linters-settings: + goimports: + local-prefixes: github.com/luxfi/mpc + +linters: + disable-all: true + enable: + - goimports + - govet + - ineffassign + - misspell + - unconvert + +issues: + exclude-dirs: + - vendor + - .git + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 0000000..7a2bdfd --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,189 @@ +version: 2 + +before: + hooks: + - go mod tidy + - go generate ./... + +builds: + - id: mpcd + main: ./cmd/mpcd + binary: mpcd + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + ldflags: + - -s -w + - -X main.version={{.Version}} + - -X main.commit={{.Commit}} + - -X main.date={{.Date}} + - -X main.builtBy=goreleaser + + - id: lux-mpc-cli + main: ./cmd/lux-mpc-cli + binary: lux-mpc-cli + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + ldflags: + - -s -w + - -X main.version={{.Version}} + - -X main.commit={{.Commit}} + - -X main.date={{.Date}} + - -X main.builtBy=goreleaser + +archives: + - id: default + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + files: + - README.md + - LICENSE + +checksum: + name_template: 'checksums.txt' + +snapshot: + name_template: "{{ incpatch .Version }}-next" + +changelog: + sort: asc + use: github + filters: + exclude: + - '^docs:' + - '^test:' + - '^chore:' + - '^ci:' + - Merge pull request + - Merge remote-tracking branch + - Merge branch + groups: + - title: 'New Features' + regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$' + order: 0 + - title: 'Bug Fixes' + regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$' + order: 1 + - title: 'Performance Improvements' + regexp: '^.*?perf(\([[:word:]]+\))??!?:.+$' + order: 2 + - title: 'Security Updates' + regexp: '^.*?sec(\([[:word:]]+\))??!?:.+$' + order: 3 + - title: 'Protocol Updates' + regexp: '^.*?protocol(\([[:word:]]+\))??!?:.+$' + order: 4 + - title: Others + order: 999 + +dockers: + - image_templates: + - "luxfi/mpc:{{ .Tag }}-amd64" + - "ghcr.io/luxfi/mpc:{{ .Tag }}-amd64" + dockerfile: Dockerfile + use: buildx + goarch: amd64 + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.source={{.GitURL}}" + - "--label=org.opencontainers.image.description=Lux MPC - Threshold Signature Service" + - "--label=org.opencontainers.image.licenses=MIT" + - image_templates: + - "luxfi/mpc:{{ .Tag }}-arm64" + - "ghcr.io/luxfi/mpc:{{ .Tag }}-arm64" + dockerfile: Dockerfile + use: buildx + goarch: arm64 + build_flag_templates: + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.source={{.GitURL}}" + - "--label=org.opencontainers.image.description=Lux MPC - Threshold Signature Service" + - "--label=org.opencontainers.image.licenses=MIT" + +docker_manifests: + - name_template: "luxfi/mpc:{{ .Tag }}" + image_templates: + - "luxfi/mpc:{{ .Tag }}-amd64" + - "luxfi/mpc:{{ .Tag }}-arm64" + - name_template: "luxfi/mpc:latest" + image_templates: + - "luxfi/mpc:{{ .Tag }}-amd64" + - "luxfi/mpc:{{ .Tag }}-arm64" + - name_template: "ghcr.io/luxfi/mpc:{{ .Tag }}" + image_templates: + - "ghcr.io/luxfi/mpc:{{ .Tag }}-amd64" + - "ghcr.io/luxfi/mpc:{{ .Tag }}-arm64" + - name_template: "ghcr.io/luxfi/mpc:latest" + image_templates: + - "ghcr.io/luxfi/mpc:{{ .Tag }}-amd64" + - "ghcr.io/luxfi/mpc:{{ .Tag }}-arm64" + +nfpms: + - id: packages + package_name: mpcd + vendor: Lux Partners Limited + homepage: https://github.com/luxfi/mpc + maintainer: Lux Development Team + description: |- + Lux MPC - Multi-Party Computation Threshold Signature Service + Supporting both ECDSA (CMP/CGGMP21) and EdDSA (FROST) protocols + license: MIT + formats: + - deb + - rpm + - apk + dependencies: + - git + recommends: + - golang + bindir: /usr/bin + contents: [] + +release: + github: + owner: luxfi + name: mpc + draft: false + prerelease: auto + mode: replace + name_template: "{{.ProjectName}}-v{{.Version}}" + footer: | + --- + + ### Container Images + + - `docker pull luxfi/mpc:{{ .Tag }}` + - `docker pull ghcr.io/luxfi/mpc:{{ .Tag }}` + + ### Verifying Checksums + + ```bash + curl -L https://github.com/luxfi/mpc/releases/download/{{ .Tag }}/checksums.txt | sha256sum -c --ignore-missing + ``` + +announce: + skip: false \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 85e88d0..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,78 +0,0 @@ -# CHANGELOG - -## v0.2.0 (2025-04-12) - -- Use onSuccess callback when sign session succeeds [View](https://github.com/fystack/mpcium/commit/9602d4d9bfe37c2d038856d3ed206bfecd2e8c93) -- Fix bug signing doesn't work after all nodes are backup [View](https://github.com/fystack/mpcium/commit/a9192ca11581dd986bdd21728cbda4b78d75a753) -- Handle duplicate message [View](https://github.com/fystack/mpcium/commit/e79f6e20fbe225e5aad8b0c9e70578356fce9573) -- Update timeout consumer keep subscribe on time [View](https://github.com/fystack/mpcium/commit/52ee83c3ecc2bbb8c16a8227f4f00b72a57c8499) -- Update signing timeout logic when not enough participants [View](https://github.com/fystack/mpcium/commit/e8ffa381f489a83e60dbcbf5262927e99eca2382) -- Persit message, handle failure and timeout sign tx [View](https://github.com/fystack/mpcium/commit/400f26912ea6b31cbf511de93c1270776055c758) - -## v0.1.7 (2024-10-05) - -- Fix bug sign mutiple transactions simulteneously crash if transactions have the same walletID [View](https://github.com/fystack/mpcium/commit/7163097387ca2c682f49bab3e4bd8ad58b33ff29) -- Revert "Debug" [View](https://github.com/fystack/mpcium/commit/f75077717d7cce0b9cfacb349470269ff1ec427b) -- Fix don't pass walletId to session [View](https://github.com/fystack/mpcium/commit/1d505fc8b5724daaf9bbdfa26380ee371ddd95ca) -- Debug [View](https://github.com/fystack/mpcium/commit/4810e7f553939821cce33859d871228eeebe0a2b) -- Fix topic composer logic [View](https://github.com/fystack/mpcium/commit/203cc7707486c2089a6af61f727fb60962b84b88) -- Update mask logic [View](https://github.com/fystack/mpcium/commit/6a3869489ef4881617839fdc1e062582293fc64e) -- Update topic composer [View](https://github.com/fystack/mpcium/commit/d06f23f16d9cd47823d1b5a5cae3428ec7bb5617) -- Minor fix on make file [View](https://github.com/fystack/mpcium/commit/f376c4f8d170ff49c8eb07b4df8d76a38f2134ae) -- Fix bug, load nats url from env var [View](https://github.com/fystack/mpcium/commit/ccfb81159a07f497dc2d8340a3b0b763719e5bcb) -- Update changelog v0.1.6 [View](https://github.com/fystack/mpcium/commit/87220e795b44ecc14759e71c5224c62b3bec08d3) - -## v0.1.6 (2024-09-22) - -- Add deployment script mpcium [View](https://github.com/fystack/mpcium/commit/5b97ce9b1208eafdf104f4045d6614889d470b93) -- Log config in generateid script [View](https://github.com/fystack/mpcium/commit/8f244d313655e5eb8a73b55cc5b3488e36d86488) -- Update configuration load logic, mask sensitive data [View](https://github.com/fystack/mpcium/commit/974b7c404a369fa6211386780881cb407ee8eee2) -- Update makefile [View](https://github.com/fystack/mpcium/commit/affc8300bf4c4cb18af0cc6a347fc8f1d8e9565a) -- Fix load config, it doens't load environment variables into struct [View](https://github.com/fystack/mpcium/commit/744e8ce48dde25fce460eaad4d73ebf2f38e2be3) -- Implement connection for prod [View](https://github.com/fystack/mpcium/commit/5e10a81f12c0879fe8ffd1ee918068ebc50114d3) -- Update changelog [View](https://github.com/fystack/mpcium/commit/d0572d20839b2b512da04f63ffc2d1fc28610cbf) - -## v0.1.5 (2024-06-22) - -- Add a slight delay before start sending key generation messages [View](https://github.com/fystack/mpcium/commit/c8229c0a32510eb3faeb7dc2025b4832cb65c715) -- Add type to sessions [View](https://github.com/fystack/mpcium/commit/c7c70e36c39125e6899f5e315493a0a84e47f2dd) -- Add comment to add distributed lock [View](https://github.com/fystack/mpcium/commit/66d106838f335eae3852d4434f87d2cdf9efe6dd) -- Add delay temporary solution to fix the issue that sign before all nodes are ready [View](https://github.com/fystack/mpcium/commit/f30c1c5c543e360f5691fe3e434a75fcefe83056) - -## v0.1.4 (2024-05-19) - -- Add script to add prefix ecdsa for existing keyinfos [View](https://github.com/fystack/mpcium/commit/9495ce20aea153ba00abde65a6628bf1f2602144) -- Return only Signature in siging success event [View](https://github.com/fystack/mpcium/commit/c915a5f7f925b85af67f0a5dd6c5ba29f3eee818) -- Support eddsa signing [View](https://github.com/fystack/mpcium/commit/efc5125ed60ca774c382a5bcb01bb6de6fa548f0) -- Add eddsa signing session [View](https://github.com/fystack/mpcium/commit/fc9f2c20bb41c8542edc3bbecaf261c44a52122d) -- Init logger for migration script [View](https://github.com/fystack/mpcium/commit/9159523bade220ca83fd6db81b17ccf600c9229c) - -## v0.1.3 (2024-05-11) - -- Add script to add prefix ecdsa for old keys [View](https://github.com/fystack/mpcium/commit/934c37c9dc6b68c2ab5a7a7afe71758aae9f44ed) -- Include eddsa pubkey in SucessGenerationEvent [View](https://github.com/fystack/mpcium/commit/92a102ed2d037e2b6929d70d082795454db42f14) -- Gen eddsa key works now [View](https://github.com/fystack/mpcium/commit/44866f36d37bfa16bcfc610e057fd7a3037e9efb) -- Support EDDSA [View](https://github.com/fystack/mpcium/commit/722e636ad00edd57243b62e877a01f629aa27b84) -- Update changelog [View](https://github.com/fystack/mpcium/commit/e9ed0f67279b6476c8d1a8638b0baad59ebd018c) - -## v0.1.2 (2024-03-09) - -- Change default threshhold to 1 [View](https://github.com/fystack/mpcium/commit/f360810aa760b52871a7cba0b107d09e6bbd7d47) -- Refine log [View](https://github.com/fystack/mpcium/commit/a9cde4014c16cfe3ca6de73039b8e13c49bb65d0) -- Threshold validation, allow t+1 peers to create signing session [View](https://github.com/fystack/mpcium/commit/b5c15463fa01f58ce9557ebffaa0e96ce6dcda2d) -- Implement keyinfo store [View](https://github.com/fystack/mpcium/commit/d39168ddd7fc622eb1a58ea90617680249297515) -- Track ready nodes, reduce readiness period to 1 second [View](https://github.com/fystack/mpcium/commit/75cb2b286fffb89f1df39c680dee3d1cacfcffc9) -- Upgrade tss-lib to v2 [View](https://github.com/fystack/mpcium/commit/cd324358d7c297d2025ea2d0c02464b5552f513d) - -## v0.1.1 (2024-02-05) - -- Add retry package to improve NATs direct message resiliency [View](https://github.com/fystack/mpcium/commit/195f9a4c50732919994b67c13396f141fa4efcdf) -- Move listening to incoming message prior to genkey and signing to fix the nats: no responders available for request [View](https://github.com/fystack/mpcium/commit/421d02e947d12324c188d9bb2868cfb9ee02c3ca) -- Increase direct messsage timeout, add mutex lock for party updating [View](https://github.com/fystack/mpcium/commit/1d75eeea669212ff4b3168575cc07d4e8a0280ae) -- Pass done function to session to clean up resource after successful execution [View](https://github.com/fystack/mpcium/commit/e5430315a3ddc4a0b74b456ee9f4f68b185b5c5e) -- Implement queue manager to spawn message queue based on topic [View](https://github.com/fystack/mpcium/commit/7c107c9cba7db68358df77ea00b00d8d6b659d1d) -- Decode round8, round9 [View](https://github.com/fystack/mpcium/commit/ccd869e8d827ecfeae453b0f865899d35b520e0f) -- Refactor signing session [View](https://github.com/fystack/mpcium/commit/4447cc46da86c03a2353edc7f01b85fd79ead084) -- Clean up code [View](https://github.com/fystack/mpcium/commit/b40d8a42082ed00099054b9852a3415ca24426d0) -- Refactor keygen session [View](https://github.com/fystack/mpcium/commit/11bc34bb5831b0b3ad39fe14e77ef56a931d023d) -- Fix script to load peer ids to consul [View](https://github.com/fystack/mpcium/commit/70a1b53c350ce6414cca308aa588a53495c9411f) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..11a1ed6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,36 @@ +# Stage 1: Build embedded admin UI +FROM node:22-alpine AS ui +RUN corepack enable && corepack prepare pnpm@latest --activate +WORKDIR /ui +COPY ui/package.json ui/pnpm-lock.yaml* ./ +RUN pnpm install --frozen-lockfile 2>/dev/null || pnpm install +COPY ui/ . +RUN pnpm build + +# Lux MPC โ€” single image ships both daemon (mpcd) + CLI (mpc). +# Default entrypoint: mpcd. Override ENTRYPOINT / CMD with `mpc ` for CLI. +# syntax=docker/dockerfile:1 + +FROM --platform=$BUILDPLATFORM golang:1.26-alpine AS builder +RUN apk add --no-cache git ca-certificates +ARG GITHUB_TOKEN +RUN git config --global url."https://${GITHUB_TOKEN}@github.com/".insteadOf "https://github.com/" +ENV GOPRIVATE=github.com/luxfi/*,github.com/hanzoai/* +ENV GONOSUMDB=github.com/luxfi/*,github.com/hanzoai/* + +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +COPY --from=ui /ui/dist ./ui/dist/ + +ENV GOEXPERIMENT=runtimesecret +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o mpcd ./cmd/mpcd +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o mpc ./cmd/mpc + +FROM alpine:3.21 +RUN apk add --no-cache ca-certificates tzdata +COPY --from=builder /app/mpcd /usr/local/bin/mpcd +COPY --from=builder /app/mpc /usr/local/bin/mpc +EXPOSE 8081 9651 9800 +ENTRYPOINT ["mpcd"] diff --git a/INSTALLATION.md b/INSTALLATION.md deleted file mode 100644 index d523a20..0000000 --- a/INSTALLATION.md +++ /dev/null @@ -1,296 +0,0 @@ -# Mpcium Installation Guide - -## Prerequisites - -Before starting, ensure you have: - -- **Go** 1.23+ installed: [Install Go here](https://go.dev/doc/install) -- **NATS** server running -- **Consul** server running - ---- - -## Clone and Install Mpcium - -### Clone the Repository - -```bash -git clone https://github.com/fystack/mpcium.git -cd mpcium -``` - -### Build the Project - -With Make: - -```bash -make -``` - -Or with Go: - -```bash -go install ./cmd/mpcium -go install ./cmd/mpcium-cli -``` - -### Available Commands - -- `mpcium`: Start an MPCium node -- `mpcium-cli`: CLI utility for peer, identity, and initiator configuration - ---- - -## Running NATS and Consul (Development Only) - -> โš ๏ธ This setup is insecure and should only be used for development. For production, use a secure cluster environment with TLS certificates. - -### Docker Compose Configuration - -Create a `docker-compose.yaml` file: - -```yaml -version: "3" - -services: - nats-server: - image: nats:latest - container_name: nats-server - command: -js --http_port 8222 - ports: - - "4222:4222" - - "8222:8222" - - "6222:6222" - tty: true - restart: always - - consul: - image: consul:1.15.4 - container_name: consul - ports: - - "8500:8500" - - "8601:8600/udp" - command: "agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0" - restart: always -``` - -### Start the Services - -```bash -docker compose up -d -``` - ---- - -## Generate Peer Configuration - -```bash -mpcium-cli generate-peers -n 3 -``` - -Example output: - -```json -{ - "node0": "12345678-1234-1234-1234-123456789abc", - "node1": "23456789-2345-2345-2345-23456789abcd", - "node2": "34567890-3456-3456-3456-3456789abcde" -} -``` - ---- - -## Cluster Configuration - -### 1. Create and Update `config.yaml` - -```bash -cp config.yaml.template config.yaml -``` - -Edit `config.yaml`: - -```yaml -nats: - url: nats://127.0.0.1:4222 -consul: - address: localhost:8500 - -mpc_threshold: 2 -environment: development -badger_password: "your_badger_password" -event_initiator_pubkey: "your_event_initiator_pubkey" -``` - -### Generate a Strong Password (Recommended) - -```bash -< /dev/urandom tr -dc 'A-Za-z0-9!@#$^&*()-_=+[]{}|;:,.<>?/~' | head -c 16; echo -``` - -Example: - -```yaml -badger_password: "F))ysJp?E]ol&I;^" -``` - -### 2. Register Peers to Consul - -```bash -mpcium-cli register-peers -``` - ---- - -## Event Initiator Setup - -### Generate the Initiator - -```bash -mpcium-cli generate-initiator -``` - -> ๐Ÿ’ก Use `--encrypt` in production. - -### Add Public Key to `config.yaml` - -From `event_initiator.identity.json`: - -```json -{ - "public_key": "09be5d070816aadaa1b6638cad33e819a8aed7101626f6bf1e0b427412c3408a" -} -``` - -Update `config.yaml`: - -```yaml -event_initiator_pubkey: "09be5d070816aadaa1b6638cad33e819a8aed7101626f6bf1e0b427412c3408a" -``` - ---- - -## Configure Node Identities - -### 1. Create Node Folders - -```bash -mkdir node{0..2} -for dir in node{0..2}; do - cp config.yaml peers.json "$dir/" - mkdir -p "$dir/identity" -done -``` - -### 2. Generate Identity for Each Node - -Example for `node0`: - -```bash -cd node0 -mpcium-cli generate-identity --node node0 -``` - -> ๐Ÿ’ก For production, use encryption: -> -> ```bash -> mpcium-cli generate-identity --node node0 --encrypt -> ``` - -### Generate Strong Password for Encryption - -```bash -< /dev/urandom tr -dc 'A-Za-z0-9!@#$^&*()-_=+[]{}|;:,.<>?/~' | head -c 16; echo -``` - -### 3. Distribute Identity Files to All Nodes - -```bash -cp identity/node0_identity.json ../node1/identity/node0_identity.json -cp identity/node0_identity.json ../node2/identity/node0_identity.json -``` - -Repeat this for `node1` and `node2`. - -### Folder Structure Example - -``` -โ”œโ”€โ”€ node0 -โ”‚ย ย  โ”œโ”€โ”€ config.yaml -โ”‚ย ย  โ”œโ”€โ”€ identity -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node0_identity.json -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node0_private.key -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node1_identity.json -โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ node2_identity.json -โ”‚ย ย  โ””โ”€โ”€ peers.json -โ”œโ”€โ”€ node1 -โ”‚ย ย  โ”œโ”€โ”€ config.yaml -โ”‚ย ย  โ”œโ”€โ”€ identity -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node0_identity.json -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node1_identity.json -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node1_private.key -โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ node2_identity.json -โ”‚ย ย  โ””โ”€โ”€ peers.json -โ”œโ”€โ”€ node2 -โ”‚ย ย  โ”œโ”€โ”€ config.yaml -โ”‚ย ย  โ”œโ”€โ”€ identity -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node0_identity.json -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node1_identity.json -โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ node2_identity.json -โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ node2_private.key -โ”‚ย ย  โ””โ”€โ”€ peers.json -``` - ---- - -## Start Mpcium Nodes - -Start each node: - -```bash -cd node0 -mpcium start -n node0 -``` - -```bash -cd node1 -mpcium start -n node1 -``` - -```bash -cd node2 -mpcium start -n node2 -``` - -> ๐Ÿ’ก In production, avoid hardcoded passwords: -> -> ```bash -> mpcium start -n node0 --prompt-credentials -> ``` - ---- - -![All node ready](images/all-node-ready.png) - ---- - -## Production Deployment (High Security) - -1. Use production-grade **NATS** and **Consul** clusters. -2. Enable **TLS certificates** on all endpoints. -3. Encrypt all keys: - ```bash - mpcium-cli generate-initiator --encrypt - mpcium-cli generate-identity --node node0 --encrypt - ``` -4. Use `--prompt-credentials` to securely input Badger passwords (avoid hardcoding in `config.yaml`). - ---- - -## Apendix - -### Decrypt initiator private key with age - -``` -age --decrypt -o event_initiator.key event_initiator.key.age -``` diff --git a/LICENSE b/LICENSE index d645695..5b2cbe3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,202 +1,122 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Lux Ecosystem License +Version 1.2, December 2025 + +Copyright (c) 2020-2025 Lux Industries Inc. +All rights reserved. + +TECHNOLOGY PORTFOLIO - PATENT APPLICATIONS PLANNED +Contact: licensing@lux.network + +================================================================================ + TERMS AND CONDITIONS +================================================================================ + +1. DEFINITIONS + + "Lux Primary Network" means the official Lux blockchain with Network ID=1 + and EVM Chain ID=96369. + + "Authorized Network" means the Lux Primary Network, official testnets/devnets, + and any L1/L2/L3 chain descending from the Lux Primary Network. + + "Descending Chain" means an L1/L2/L3 chain built on, anchored to, or deriving + security from the Lux Primary Network or its authorized testnets. + + "Research Use" means non-commercial academic research, education, personal + study, or evaluation purposes. + + "Commercial Use" means any use in connection with a product or service + offered for sale or fee, internal use by a for-profit entity, or any use + to generate revenue. + +2. GRANT OF LICENSE + + Subject to these terms, Lux Industries Inc grants you a non-exclusive, + royalty-free license to: + + (a) Use for Research Use without restriction; + + (b) Operate on the Lux Primary Network (Network ID=1, EVM Chain ID=96369); + + (c) Operate on official Lux testnets and devnets; + + (d) Operate L1/L2/L3 chains descending from the Lux Primary Network; + + (e) Build applications within the Lux ecosystem; + + (f) Contribute improvements back to the original repositories. + +3. RESTRICTIONS + + Without a commercial license from Lux Industries Inc, you may NOT: + + (a) Fork the Lux Network or any Lux software; + + (b) Create competing networks not descending from Lux Primary Network; + + (c) Use for Commercial Use outside the Lux ecosystem; + + (d) Sublicense or transfer rights outside the Lux ecosystem; + + (e) Use to create competing blockchain networks, exchanges, custody + services, or cryptographic systems outside the Lux ecosystem. + +4. NO FORKS POLICY + + Lux Industries Inc maintains ZERO TOLERANCE for unauthorized forks. + Any fork or deployment on an unauthorized network constitutes: + + (a) Breach of this license; + (b) Grounds for immediate legal action. + +5. RIGHTS RESERVATION + + All rights not explicitly granted are reserved by Lux Industries Inc. + + We plan to apply for patent protection for the technology in this + repository. Any implementation outside the Lux ecosystem may require + a separate commercial license. + +6. DISCLAIMER OF WARRANTY + + THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + +7. LIMITATION OF LIABILITY + + IN NO EVENT SHALL LUX INDUSTRIES INC BE LIABLE FOR ANY CLAIM, DAMAGES + OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE. + +8. TERMINATION + + This license terminates immediately upon any breach, including but not + limited to deployment on unauthorized networks or creation of forks. + +9. GOVERNING LAW + + This License shall be governed by the laws of the State of Delaware. + +10. COMMERCIAL LICENSING + + For commercial use outside the Lux ecosystem: + + Lux Industries Inc. + Email: licensing@lux.network + Subject: Commercial License Request + +================================================================================ + TL;DR +================================================================================ + +- Research/academic use = OK +- Lux Primary Network (Network ID=1, Chain ID=96369) = OK +- L1/L2/L3 chains descending from Lux Primary Network = OK +- Commercial products outside Lux ecosystem = Contact licensing@lux.network +- Forks = Absolutely not + +================================================================================ + +See LP-0012 for full licensing documentation: +https://github.com/luxfi/lps/blob/main/LPs/lp-0012-ecosystem-licensing.md diff --git a/LLM.md b/LLM.md new file mode 100644 index 0000000..3aa9157 --- /dev/null +++ b/LLM.md @@ -0,0 +1,478 @@ +# LLM.md - Hanzo MPC Signer Architecture & Development Guide + +This document provides comprehensive guidance for AI assistants working with the Hanzo MPC (Multi-Party Computation) codebase. + +## ๐Ÿญ Production Deployment State (2026-03-02) + +### Namespaces +- `lux-mpc` โ€” PRIMARY production consensus-mode MPC (3 nodes, dashboard API, postgres, valkey) +- `lux-bridge` โ€” Bridge-specific MPC (3 consensus-mode nodes, dashboard API using bridge postgres) +- `hanzo` โ€” Legacy NATS/Consul MPC nodes (5 nodes, older deployment) + +### Key Decisions +- **Storage**: ZapDB (`github.com/luxfi/database/zapdb`) โ€” our fork, NOT badger directly +- **Encryption**: `encdb.New(password, rawDB)` wraps ZapDB with ChaCha20-Poly1305; backups contain pre-encrypted values, restore into raw zapdb +- **Dashboard API**: Port 8081, enabled by `MPC_API_DB` env var; uses ORM's `_entities` JSONB table +- **Multi-tenancy**: One postgres instance, one `_entities` table, `kind` + `orgId` in JSONB data +- **Binary distribution**: S3 bucket `lux-mpc-backups/binaries/` (public read); startup script promotes `/data/mpcd.new` +- **S3 address**: Use ClusterIP `10.124.44.247:9000` from lux-bridge pods (cross-namespace DNS fails); internal: `s3.hanzo.svc.cluster.local:9000` works only from hanzo namespace + +### Infrastructure Principles (keep it light) +- 1 postgres per cluster (MPC gets `mpc_api` db, bridge gets `bridge` db) +- 1 valkey/redis per cluster for KV cache +- 1 S3 bucket with org-prefixed paths for all backups +- ZapDB passwords via HSM PasswordProvider (`--hsm-provider=aws|gcp|azure|env|file`, default `env` for backward compat). Both legacy and consensus modes use `resolveZapDBPassword()` in `cmd/mpcd/main.go`. +- Customer-controlled encryption: each org's ZapDB encrypted with org's KMS key โ€” FUTURE WORK + +### ORM Filter Behavior +- `Filter("keyHash=", value)` โ†’ SQL: `WHERE data->>'keyHash' = $1` (camelCase from Go field name) +- `Filter("key_hash=", value)` โ†’ SQL: `WHERE data->>'key_hash' = $1` (no conversion) +- ALWAYS use camelCase matching the struct's `json:""` tag when filtering + +## ๐Ÿ“š Overview + +Hanzo MPC is a threshold signing service that provides: +- **ECDSA (secp256k1)** for Bitcoin/Ethereum/EVM chains +- **EdDSA (Ed25519)** for Solana/Polkadot/Sui +- **Threshold signatures** (t-of-n) with CGGMP21 protocol +- **Key resharing** for rotation without changing addresses + +### Architecture Position + +Hanzo MPC is designed as a **pluggable signer backend** for Hanzo KMS: + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Hanzo KMS (Control Plane) โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ Policy โ”‚ Approvals โ”‚ Audit Log โ”‚ Key Registry โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ Unified Signing API โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ HSM โ”‚ โ”‚ MPC โ”‚ โ”‚ Software โ”‚ โ”‚ Remote โ”‚ โ”‚ +โ”‚ โ”‚ Signer โ”‚ โ”‚ Signer โ”‚ โ”‚ Signer โ”‚ โ”‚ Signer โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +### Product Architecture + +1. **Hanzo KMS Platform** (Control Plane) + - Key registry + metadata + - Policy + workflow (quorum, time locks, spend limits, allowlists) + - Audit log + - Unified API + - Secrets manager + +2. **Hanzo MPC Signer** (This Project - Data Plane) + - DKG / key share management + - Threshold signing sessions + - Reshare/rotate shares + - Optional hardware-rooted modes + +3. **Hanzo HSM** (Alternative Signer) + - HSM-backed keys for classic KMS workloads + - HSM-sealed share storage for MPC nodes + +4. **Hanzo Treasury** (Optional UI) + - Transaction building + chain adapters + - Simulation / policy previews + - Approvals UI (backed by KMS workflow engine) + +## ๐Ÿš€ Quick Start + +### Build and Install +```bash +# Build binaries +make build + +# Or install directly (for consensus-embedded mode) +go install ./cmd/mpcd + +# Or for legacy NATS/Consul mode +go install ./cmd/lux-mpc-cli +``` + +### Consensus Mode (NEW - Recommended) +```bash +# Start MPC node in consensus mode (no external dependencies) +mpcd start --mode consensus \ + --node-id node0 \ + --listen :9651 \ + --api :9800 \ + --data /data/mpc/node0 \ + --threshold 2 \ + --peer node1@127.0.0.1:9652 \ + --peer node2@127.0.0.1:9653 + +# Or via lux CLI +lux mpc init --threshold 2 --nodes 3 +lux mpc start +``` + +### Legacy Mode (NATS + Consul) +```bash +# Generate peers configuration +lux-mpc-cli generate-peers -n 3 + +# Register peers to Consul +lux-mpc-cli register-peers + +# Generate event initiator +lux-mpc-cli generate-initiator + +# Generate node identity +lux-mpc-cli generate-identity --node node0 + +# Start MPC node in legacy mode +mpcd start --mode legacy -n node0 +``` + +## ๐Ÿ“ Project Structure + +``` +/Users/z/work/lux/mpc/ +โ”œโ”€โ”€ cmd/ # Command-line applications +โ”‚ โ”œโ”€โ”€ mpcd/ # Main MPC daemon (consensus-embedded) +โ”‚ โ””โ”€โ”€ lux-mpc-cli/ # CLI tools for configuration +โ”œโ”€โ”€ pkg/ # Core packages +โ”‚ โ”œโ”€โ”€ client/ # Go client library +โ”‚ โ”œโ”€โ”€ mpc/ # MPC implementation (TSS) +โ”‚ โ”œโ”€โ”€ kvstore/ # BadgerDB storage +โ”‚ โ”œโ”€โ”€ transport/ # Consensus-embedded transport (ZAP + PoA) +โ”‚ โ”œโ”€โ”€ messaging/ # NATS messaging (DEPRECATED - use transport) +โ”‚ โ”œโ”€โ”€ infra/ # Consul integration (DEPRECATED - use transport) +โ”‚ โ”œโ”€โ”€ identity/ # Ed25519 identity management +โ”‚ โ””โ”€โ”€ eventconsumer/ # Event processing +โ”œโ”€โ”€ e2e/ # End-to-end tests +โ”œโ”€โ”€ examples/ # Usage examples +โ””โ”€โ”€ scripts/ # Utility scripts +``` + +## ๐Ÿ—๏ธ Core Components + +### 1. MPC Engine +Based on threshold cryptography: +- **CGGMP21** protocol for ECDSA (secp256k1) - **IMPLEMENTED & TESTED** +- **FROST** protocol for EdDSA (Ed25519) - **IMPLEMENTED & TESTED** (keygen generates both ECDSA and EdDSA keys) +- Configurable threshold (t-of-n) +- Default: t = โŒŠn/2โŒ‹ + 1 (majority) + +### 2. Storage Layer: BadgerDB +- AES-256 encrypted key shares +- Session data persistence +- Automatic backups + +### 3. Transport Layer (NEW - Jan 2026) + +The MPC daemon now supports **consensus-embedded transport** that eliminates external dependencies: + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ MPC Node (Consensus-Embedded) โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ PubSub โ”‚ MessageQ โ”‚ Registry โ”‚ KeyInfoStore โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ ZAP Transport (Wire Protocol) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ Membership (Ed25519 PoA Validators) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ StateStore (BadgerDB + Replication) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +**What was replaced:** +- **NATS** โ†’ ZAP Transport with `Broadcast()`/`Query()` +- **Consul** โ†’ Consensus `Membership` with Ed25519 keys as PoA validators +- **PostgreSQL** โ†’ `StateStore` + BadgerDB for replicated state +- **Redis** โ†’ Consensus state queries via `Transport.Query()` + +**What remains:** +- **BadgerDB** โ†’ Local encrypted key share storage (unchanged) +- **Ed25519 identities** โ†’ Now serve as PoA validator keys + +**Usage (pkg/transport):** +```go +factory, err := transport.NewFactory(transport.FactoryConfig{ + NodeID: "node0", + ListenAddr: ":9651", + Peers: map[string]string{"node0": ":9651", "node1": ":9652", "node2": ":9653"}, + PrivateKey: privateKey, + PublicKey: publicKey, + BadgerPath: "/data/mpc/node0", + BadgerPassword: "secure-password", +}) + +ctx := context.Background() +factory.Start(ctx) + +// Use these instead of NATS/Consul: +pubSub := factory.PubSub() // replaces messaging.PubSub +registry := factory.Registry() // replaces mpc.PeerRegistry +kvstore := factory.KVStore() // local BadgerDB +keyinfo := factory.KeyInfoStore() // replaces Consul-based keyinfo.Store +``` + +### 3b. Messaging: NATS JetStream (DEPRECATED) +- Pub/sub for broadcasts +- Direct messaging for P2P +- Message persistence +- **โš ๏ธ Use `pkg/transport` instead for new deployments** + +### 4. Service Discovery: Consul (DEPRECATED) +- Node registration +- Health checking +- Configuration management +- **โš ๏ธ Use `pkg/transport` Membership instead for new deployments** + +### 5. Identity: Ed25519 keypairs +- Node authentication +- Message signing/verification +- Encrypted with Age + +## ๐Ÿ”ง Configuration + +### Consensus-Embedded Mode (NEW - Jan 2026) + +```yaml +# config.yaml +environment: development +transport: + listen_addr: ":9651" + peers: + node0: "10.0.0.1:9651" + node1: "10.0.0.2:9651" + node2: "10.0.0.3:9651" +badger: + path: "/data/mpc" + password: "secure-password" + backup_dir: "/data/mpc/backups" +identity: + key_file: "node0_identity.json" +event_initiator_pubkey: "hex-encoded-pubkey" +``` + +### Legacy Mode (NATS + Consul) + +```yaml +# config.yaml +environment: development +consul: + address: localhost:8500 +nats: + url: nats://localhost:4222 +badger_password: "secure-password" +event_initiator_pubkey: "hex-encoded-pubkey" +``` + +### Environment Variables +- `LUX_MPC_CONFIG` - Path to config.yaml +- `LUX_MPC_BACKUP` - Backup file identifier +- `LUX_MPC_MODE` - "consensus" (new) or "legacy" (NATS/Consul) + +## ๐Ÿ” Security Model + +- **Threshold Security**: No single node has the complete key +- **Message Authentication**: All messages signed with Ed25519 +- **Storage Encryption**: BadgerDB encrypted with user password +- **Network Security**: TLS + mutual authentication +- **Key Rotation**: Supports resharing without changing addresses + +## ๐Ÿ“Š Performance + +- **Key Generation**: ~30s for 3 nodes +- **Signing**: <1s for threshold signatures +- **Storage**: ~100MB per node (with backups) +- **Network**: Low bandwidth, resilient to failures + +## ๐Ÿ”— Integration with Hanzo Commerce + +The MPC Signer integrates with Commerce for crypto payments: + +```go +// Commerce uses MPC via the processor interface +type MPCProcessor struct { + kmsClient *kms.Client // Hanzo KMS for policy/approval + mpcClient *mpc.Client // Hanzo MPC for signing +} + +func (p *MPCProcessor) Charge(ctx context.Context, req PaymentRequest) (*PaymentResult, error) { + // 1. KMS validates policy and approvals + // 2. MPC signs the transaction + // 3. Transaction broadcast to blockchain +} +``` + +## ๐Ÿ”ง Development Workflow + +### Testing +```bash +# Run unit tests +make test + +# Run with coverage +make test-coverage + +# Run E2E tests +make e2e-test +``` + +### Common Tasks + +1. **Generate 3-node test cluster**: + ```bash + ./setup_identities.sh + ``` + +2. **Recover from backup**: + ```bash + hanzo-mpc-cli recover --backup-dir ./backups --recovery-path ./recovered-db + ``` + +3. **Production deployment**: + - Use `--encrypt` flag for identity generation + - Enable TLS on all services + - Use `--prompt-credentials` to avoid hardcoded passwords + +## ๐Ÿ› Common Issues + +1. **Port conflicts**: Default ports are 4222 (NATS), 8500 (Consul) +2. **Database locks**: Ensure single process per node +3. **Network delays**: Check NATS/Consul connectivity +4. **Backup failures**: Verify disk space and permissions + +### CGGMP21 Protocol Issues (Debugged Jan 2026) + +5. **Protocol message serialization**: Protocol messages MUST use `MarshalBinary/UnmarshalBinary` to preserve all fields (SSID, RoundNumber, etc.). Raw JSON marshaling loses critical protocol state. + +6. **Party ID ordering**: Party IDs must be sorted consistently across all nodes. The `GetReadyPeersIncludeSelf()` function in `registry.go` sorts peer IDs to ensure deterministic ordering. + +7. **NATS topic naming**: Result topics must match JetStream stream configuration: + - Keygen results: `mpc.mpc_keygen_result.` (note the `mpc.mpc_` prefix) + - Signing results: `mpc.mpc_signing_result.` + - Stream expects pattern: `mpc.mpc_*_result.*` + +8. **Self-message rejection**: It's NORMAL for nodes to log "Handler cannot accept message" warnings when they receive their own broadcast messages back. This is expected behavior in pub/sub systems. + +9. **Binary rebuild for e2e tests**: E2E tests use `hanzo-mpc` from PATH. After code changes, run `go install ./cmd/hanzo-mpc && go install ./cmd/hanzo-mpc-cli` to update the installed binaries. + +10. **Session result publishing pattern**: Individual protocol sessions (CGGMP21, FROST) should NOT publish success events directly to the result queue. The handler (`keygen_handler_cggmp21.go`) is responsible for publishing the combined result with both ECDSA and EdDSA keys. Sessions should only: + - Publish FAILURE events to the queue (for immediate error notification) + - Send success pubkey via `externalFinishChan` so `WaitForFinish()` returns + - Always send to `externalFinishChan` (even empty string for errors) to prevent blocking + +11. **Dual keygen architecture**: The `handleKeyGenEventCGGMP21` function runs both ECDSA (CGGMP21) and EdDSA (FROST) keygen protocols in parallel via goroutines with WaitGroup. Both sessions must complete before the handler publishes the combined result containing both public keys. + +### FROST Signing Issues (Debugged Jan 2026) + +12. **FROST config serialization (CRITICAL)**: `frost.TaprootConfig` contains crypto types (`*curve.Secp256k1Scalar`, `*curve.Secp256k1Point`) that **do NOT have JSON marshalers**. Using `json.Marshal()` corrupts the key shares. **MUST use CBOR serialization** via `MarshalFROSTConfig()` and `UnmarshalFROSTConfig()` in `frost_config_marshal.go`. + +13. **FROST signing result type**: The FROST Taproot signing protocol returns `taproot.Signature` (which is `[]byte` of 64 bytes), NOT `*frost.Signature`. The `signing_session_frost.go` handles this correctly with: `s.signature = result.(taproot.Signature)`. + +14. **BIP-340/Taproot signature format**: FROST signing produces BIP-340 compatible signatures (64 bytes: R_x || s). The `taproot.Signature` type is already in this format, so no additional conversion is needed in `publishResult()`. + +### LSS Protocol Issues (Fixed Jan 2026) + +15. **LSS config serialization (CRITICAL - FIXED)**: Similar to FROST, `lssConfig.Config` contains crypto types (`curve.Scalar`, `curve.Point`) that **do NOT have JSON marshalers**. Fixed by implementing `MarshalLSSConfig()` and `UnmarshalLSSConfig()` in `lss_config_marshal.go` using CBOR serialization. + +16. **LSS capabilities vs CGGMP21**: LSS supports dynamic resharing (change T-of-N without reconstructing keys), threshold changes, and adding/removing participants. CGGMP21 only supports refresh (same committee). Both produce valid ECDSA signatures. + +### Security Audit Findings (Jan 2026) + +17. **Message authentication**: Protocol messages between nodes are not signed. Ed25519 signing code exists but is disabled. Consider re-enabling for production deployments. + +18. **Deduplication map cleanup**: The `processing` map used for deduplication grows unbounded. Recommend adding TTL-based cleanup for long-running sessions. + +19. **Protocol timeouts**: No timeout enforcement on protocol handlers. Recommend adding context with timeout to prevent indefinite hangs from stalling parties. + +### Security Hardening (Apr 2026) + +23. **Internal API auth (port 9800)**: All mutating endpoints (/keygen, /keys, /backup) now require `Authorization: Bearer `. Token sourced from `MPC_INTERNAL_API_KEY` env var (KMS-synced). Falls back to deterministic derivation from Ed25519 node identity for dev. /health remains unauthenticated for K8s probes. + +24. **TLS-only transport**: `DualModeListener` replaced by `TLSOnlyListener`. Plaintext connections are rejected. TLS 1.3 with PQ key exchange (X25519MLKEM768) is mandatory for all peer-to-peer communication. + +25. **Sign rate limiting**: Bridge signing and intent signing endpoints have a dedicated 20 RPM per-IP rate limiter, separate from the global 100 RPM limit. + +26. **Internal API hardening**: Port 9800 now has body size limits (1 MB), read/write timeouts, and 10 RPM rate limiting on mutating endpoints. + +27. **Network egress restriction**: K8s NetworkPolicy for MPC nodes now restricts egress to peer nodes, postgres, valkey, DNS, and HTTPS (S3 backup) only. No unrestricted egress. + +28. **Compose.yml secrets**: JWT_SECRET now uses `${JWT_SECRET:?}` (required) instead of a hardcoded dev default. + +29. **Cluster API key forwarding**: api-deployment now passes `--cluster-api-key $(MPC_CLUSTER_API_KEY)` when proxying to internal MPC API. The `apiOnlyMPCBackend.doRequest` sends it as `Authorization: Bearer` header. + +### Consensus-Embedded Transport (Jan 2026) + +20. **ZAP Message Types**: MPC uses ZAP wire protocol message types 60-79: + - `MsgMPCBroadcast (60)` - Pub/sub broadcasts + - `MsgMPCDirect (61)` - Point-to-point messaging + - `MsgMPCReady (62)` - Peer registry readiness + - `MsgMPCKeygen (64)` - DKG protocol messages + - `MsgMPCSign (65)` - Signing protocol messages + - `MsgMPCReshare (66)` - Key resharing messages + - `MsgMPCResult (67)` - Session result messages + +21. **PoA Membership**: Ed25519 public keys are used as Proof-of-Authority validators. VoterIDs are derived via `SHA256("MPC/Ed25519" || pubkey)`. + +22. **State Replication**: Key metadata is replicated via consensus transport. Local BadgerDB stores encrypted key shares (not replicated for security). + +## ๐ŸŒ Blockchain Support + +| Blockchain | Support | Curve | Protocol | +|------------|---------|-------|----------| +| Bitcoin (Legacy/SegWit) | โœ… Full | secp256k1 | CGGMP21/LSS | +| Bitcoin (Taproot) | โœ… Full | secp256k1 | FROST | +| Ethereum/EVM | โœ… Full | secp256k1 | CGGMP21/LSS | +| XRPL (XRP Ledger) | โœ… Full | secp256k1 | CGGMP21/LSS | +| Lux Network | โœ… Full | secp256k1 | CGGMP21/LSS | +| Polkadot/Kusama | โœ… Full | ristretto255 | FROST (SR25519) | +| Solana | โš ๏ธ Partial | Ed25519 | FROST (Taproot mode) | +| TON | โš ๏ธ Partial | Ed25519 | FROST (Taproot mode) | + +**Note**: Solana/TON use Ed25519 natively but our FROST implementation produces Taproot/BIP-340 signatures. Native Ed25519 support requires implementing the Ed25519 FROST variant. + +### SR25519 (Ristretto255/Schnorrkel) Implementation + +SR25519 threshold signing uses the generic FROST protocol over the ristretto255 prime-order group: +- **Keygen**: `frost.Keygen(Ristretto255{}, ...)` produces `*frost.Config` with ristretto255 curve types +- **Signing**: `frost.Sign(config, signers, message)` with signing context prepended to message +- **Signing context**: Default "substrate", configurable per-session (Schnorrkel convention) +- **Storage**: Key shares stored with `sr25519:` prefix in BadgerDB, serialized via CBOR +- **Signature format**: R (32 bytes, ristretto point) || z (32 bytes, ristretto scalar) = 64 bytes +- **Ristretto255 curve types**: Implemented locally in `pkg/mpc/ristretto255.go` using `gtank/ristretto255` library, satisfying the threshold library's `curve.Curve` interface. Will be replaced when threshold library publishes native Ristretto255 support. + +## ๐ŸŽฏ Best Practices + +1. **Always backup** BadgerDB before major operations +2. **Test locally** with 3-node setup before production +3. **Monitor health** via Consul UI (http://localhost:8500) +4. **Rotate keys** periodically using reshare functionality +5. **Use Age encryption** for production identities +6. **Keep logs** for debugging MPC rounds + +## Context for All AI Assistants + +This file (`LLM.md`) is symlinked as: +- `.AGENTS.md` +- `CLAUDE.md` +- `QWEN.md` +- `GEMINI.md` + +All files reference the same knowledge base. Updates here propagate to all AI systems. + +## Rules for AI Assistants + +1. **ALWAYS** update LLM.md with significant discoveries +2. **NEVER** commit symlinked files (.AGENTS.md, CLAUDE.md, etc.) - they're in .gitignore +3. **NEVER** create random summary files - update THIS file diff --git a/Makefile b/Makefile index c9fefa5..3b2af8a 100644 --- a/Makefile +++ b/Makefile @@ -1,21 +1,63 @@ -.PHONY: all build clean mpcium mpc +.PHONY: all build clean mpcd mpc test test-verbose test-coverage e2e-test e2e-clean cleanup-test-env test-all clean-all up down logs -BIN_DIR := bin +# Go 1.26 experimental features: +# runtimesecret - zeroes stack/register state after secret.Do() for forward secrecy +# simd - SIMD intrinsics (amd64/arm64) +GOEXPERIMENT ?= runtimesecret,simd -# Default target all: build -# Build both binaries -build: mpcium mpc +build: mpcd mpc -# Install mpcium (builds and places it in $GOBIN or $GOPATH/bin) -mpcium: - go install ./cmd/mpcium +mpcd: + GOWORK=off GOEXPERIMENT=$(GOEXPERIMENT) go build -o mpcd ./cmd/mpcd -# Install mpcium-cli mpc: - go install ./cmd/mpcium-cli + GOWORK=off GOEXPERIMENT=$(GOEXPERIMENT) go build -o mpc ./cmd/mpc -# Wipe out manually built binaries if needed (not required by go install) +# Run all tests (json1 enables SQLite JSON functions for ORM JSONB queries) +test: + CGO_ENABLED=1 GOEXPERIMENT=$(GOEXPERIMENT) go test -tags json1 ./... + +test-verbose: + CGO_ENABLED=1 GOEXPERIMENT=$(GOEXPERIMENT) go test -tags json1 -v ./... + +test-coverage: + CGO_ENABLED=1 GOEXPERIMENT=$(GOEXPERIMENT) go test -tags json1 -v -coverprofile=coverage.out ./... + go tool cover -html=coverage.out -o coverage.html + +# E2E +e2e-test: build + @echo "Running E2E integration tests..." + cd e2e && make test + +e2e-test-coverage: build + @echo "Running E2E integration tests with coverage..." + cd e2e && make test-coverage + +e2e-clean: + @echo "Cleaning up E2E test artifacts..." + cd e2e && make clean + +cleanup-test-env: + @echo "Performing comprehensive test environment cleanup..." + cd e2e && ./cleanup_test_env.sh + +test-all: test e2e-test + +# Local dev environment (compose.yml) +up: + docker compose up -d + +down: + docker compose down + +logs: + docker compose logs -f + +# Clean clean: - rm -rf $(BIN_DIR) + rm -f mpcd mpc + rm -f coverage.out coverage.html + +clean-all: clean e2e-clean diff --git a/README.md b/README.md index 0d02530..bade3e0 100644 --- a/README.md +++ b/README.md @@ -1,164 +1,186 @@ -
+# Lux MPC -# mpcium: Resilient MPC (Multi-Party Computation) Nodes for Distributed Crypto Wallet Generation +Threshold signing engine -- CGGMP21 (ECDSA), FROST (EdDSA), BLS, and SR25519. No full private key reconstruction, ever. -> _"Setting up MPC wallets has always been painful, complex, and confusing. With MPCIUM, you can launch a secure MPC node cluster and generate wallets in minutes."_ +``` +go get github.com/luxfi/mpc +``` -

-

Join our Telegram community to discuss MPCIUM and Web3 cyber security!

+## Architecture -[![Go Version](https://img.shields.io/badge/Go-v1.23+-00ADD8?logo=go&style=for-the-badge)](https://go.dev/) -[![License](https://img.shields.io/github/license/fystack/mpcium?style=for-the-badge)](./LICENSE) -[![Go Report Card](https://goreportcard.com/badge/github.com/fystack/mpcium?style=for-the-badge)](https://goreportcard.com/report/github.com/fystack/mpcium) -[![Version](https://img.shields.io/github/v/release/fystack/mpcium?label=version&logo=semantic-release&style=for-the-badge)](https://github.com/fystack/mpcium/releases) -[![Telegram](https://img.shields.io/badge/Telegram-Community%20-26A5E4?logo=telegram&style=for-the-badge)](https://t.me/+IsRhPyWuOFxmNmM9) -[![Made by Fystack](https://img.shields.io/badge/Made%20by-Fystack-7D3DF4?style=for-the-badge)](https://fystack.io) +`luxfi/mpc` is a production MPC node (`mpcd`) that performs distributed key generation and threshold signing across a cluster. Each node holds a key share; `t` of `n` nodes must cooperate to produce a signature. The full private key never exists on any single machine, in memory or on disk. -
+### Protocols -Mpcium is a high-performance, open-source Multi-Party Computation (MPC) engine for securely generating and managing cryptographic wallets across distributed nodesโ€”without ever exposing the full private key. +| Protocol | Curve | Key Type | Use | +|----------|-------|----------|-----| +| CGGMP21 | secp256k1 | `secp256k1` | Bitcoin, Ethereum, Lux C-Chain, all EVM L2s, XRPL | +| FROST | Ed25519 | `ed25519` | Solana, TON, Polkadot, Cardano, Substrate chains | +| BLS | BLS12-381 | `bls` | Lux consensus, beacon chain, aggregated signatures | +| SR25519 | Ristretto255 | `sr25519` | Substrate/Polkadot native | -At its cryptographic core, Mpcium integrates tss-lib, a production-grade threshold signature scheme library developed by Binance. It supports: +### Threshold Model -- **ECDSA (secp256k1)**: Bitcoin, Ethereum, BNB, Polygon, and EVM-compatible L2 chains +``` +t >= floor(n/2) + 1 +``` -- **EdDSA (Ed25519)**: for Solana, Polkadot, Cardano, and other modern blockchains +A 2-of-3 cluster tolerates 1 compromised or offline node. A 3-of-5 cluster tolerates 2. The threshold is configurable at key generation time. -![Mpcium Architecture](images/mpcium.png) +### Transport Modes ---- +**Consensus (default)**: Peer-to-peer ZAP protocol with built-in PoA consensus. No external dependencies. Nodes discover each other via `--peer` flags. This is the production path. -## Resources +**Legacy**: NATS pub/sub + Consul service discovery. Deprecated but still supported via `--mode=legacy`. -- **MPC nodes architecture**: [MPC Fundamental and MPCIUM architecture](https://deepwiki.com/fystack/mpcium) -- **MPC clients**: - - [TypeScript Client](https://github.com/fystack/mpcium-client-ts) - - [Golang Client](https://github.com/fystack/mpcium/blob/master/pkg/client/client.go) +### Packages +``` +cmd/mpcd/ Daemon binary (CLI, API server, node lifecycle) +pkg/mpc/ Core MPC node -- session management, key generation, signing + keygen_session.go CGGMP21 distributed key generation + signing_session.go CGGMP21 threshold signing (secp256k1) + signing_session_frost.go FROST threshold signing (Ed25519) + bls_keygen_session.go BLS key generation + bls_signing_session.go BLS threshold signing + sr25519_keygen_session.go SR25519 key generation + sr25519_signing_session.go SR25519 threshold signing + reshare_session.go Key resharing (rotate shares without changing public key) + recovery.go Key share recovery + tfhe_session.go FHE threshold decryption sessions +pkg/api/ HTTP API (key generation, signing, key info, health) +pkg/transport/ P2P transport (consensus mode ZAP, legacy NATS) +pkg/messaging/ PubSub abstraction over transport layer +pkg/db/ Key share storage (SQLite default, PostgreSQL optional) +pkg/kvstore/ Encrypted key-value store (AES-256, age encryption) +pkg/keyinfo/ Key metadata management +pkg/identity/ Node identity (Ed25519 keypair, mutual authentication) +pkg/backup/ Encrypted periodic backups +pkg/encryption/ AES-256-GCM encryption for key material at rest +pkg/kms/ KMS integration for secret management +pkg/hsm/ HSM provider abstraction (env, AWS, GCP) +pkg/client/ Go client library for MPC API +pkg/event/ Event types (keygen, sign, reshare) +pkg/eventconsumer/ Event processing pipeline +pkg/protocol/ Wire protocol messages +pkg/settlement/ Trade settlement signing +pkg/smart/ Smart contract transaction construction +pkg/custody/ Custody policy engine +pkg/integrity/ Key share integrity verification +pkg/txtracker/ Transaction lifecycle tracking +``` -![All node ready](images/all-node-ready.png) +### Security Properties -## ๐Ÿ“ฆ Dependencies Overview +- Key shares encrypted at rest with AES-256-GCM (key from HSM/KMS) +- Inter-node messages authenticated with Ed25519 signatures +- No key share leaves the node unencrypted +- Resharing rotates shares without changing the public key or requiring all nodes +- Backup files encrypted with age (modern, audited encryption) +- Secret key bytes zeroed from memory after use (`pkg/mpc/secret.go`) -| Dependency | Purpose | -| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -| [NATS](https://nats.io) | Lightweight and resilient **messaging layer** for coordinating MPC nodes in real time. Enables pub/sub communication even under partial failure. | -| [Badger KV](https://github.com/dgraph-io/badger) | High-performance **embedded key-value store** used for local encrypted storage of MPC key shares and session data. | -| [Consul](https://www.consul.io) | **Service discovery and health checking** to allow nodes to dynamically find each other and maintain cluster integrity. | -| [tss-lib](https://github.com/binance-chain/tss-lib) | Cryptographic engine for **threshold key generation and signing**, supporting ECDSA and EdDSA (used in Bitcoin, Ethereum, Solana, etc). | -| [age](https://github.com/FiloSottile/age) | **Modern encryption tool** used for secure key material storage and protection with password-based encryption. | +## Quick Start -## Threshold & Nodes +### Consensus Mode (recommended) -Mpcium uses a **t-of-n threshold scheme** to securely generate and sign with private keys. +```bash +# Node 0 +mpcd start --node-id node0 --listen :9651 --api :9800 \ + --threshold 2 --peer node1:9651 --peer node2:9651 -- `n` = total number of MPC nodes (key shares) -- `t` = minimum number of nodes required to sign +# Node 1 +mpcd start --node-id node1 --listen :9652 --api :9801 \ + --threshold 2 --peer node0:9651 --peer node2:9651 -Only `t` out of `n` nodes need to participate โ€” the full private key is never reconstructed. +# Node 2 +mpcd start --node-id node2 --listen :9653 --api :9802 \ + --threshold 2 --peer node0:9651 --peer node1:9651 +``` -To maintain security against compromised nodes, Mpcium enforces: +### Generate a Wallet +```bash +curl -X POST http://localhost:9800/v1/keygen \ + -H "Content-Type: application/json" \ + -d '{"wallet_id": "w-001", "key_type": "secp256k1"}' ``` -t โ‰ฅ โŒŠn / 2โŒ‹ + 1 -``` - -### Example: 2-of-3 Threshold -- โœ… `node0 + node1` โ†’ signs successfully -- โœ… `node1 + node2` โ†’ signs successfully -- โŒ `node0` alone โ†’ not enough shares +### Sign a Transaction -This ensures: +```bash +curl -X POST http://localhost:9800/v1/sign \ + -H "Content-Type: application/json" \ + -d '{"wallet_id": "w-001", "message": "0xdeadbeef...", "key_type": "secp256k1"}' +``` -- No single point of compromise -- Fault tolerance if some nodes go offline -- Configurable security by adjusting `t` and `n` +### Go Client -## Architecture +```go +import "github.com/luxfi/mpc/pkg/client" -### Overview +c := client.New("http://localhost:9800") +result, err := c.CreateWallet("w-001", "secp256k1") +sig, err := c.Sign("w-001", txHash, "secp256k1") +``` -Each Mpcium node: +## Configuration -- Holds a **key share** in local AES-256 encrypted storage (via Badger KV) -- Participates in **threshold signing** using `tss-lib` -- Communicates over a **resilient messaging layer** using NATS -- Registers itself with **Consul** for service discovery and health checks -- Verifies incoming messages using **Ed25519-based mutual authentication** +`config.yaml` or environment variables (`LUX_MPC_*`): -### Message Flow & Signature Verification +```yaml +mode: consensus +environment: local # mainnet | testnet | local +mpc_threshold: 2 +max_concurrent_keygen: 2 +db_path: "." # SQLite (default), or postgres:// URL +backup_enabled: true +backup_period_seconds: 300 +backup_dir: backups +``` -1. A signing request is broadcast to the MPC cluster through **NATS** as an authenticated event. Each node **verifies the sender's Ed25519 signature** before processing the request. -2. NATS broadcasts the request to the MPC nodes. -3. Each participating node verifies: - - The **signature** of the sender (Ed25519) - - The **authenticity** of the message (non-replayable, unique session) -4. If the node is healthy and within the quorum (`t`), it: - - Computes a partial signature using its share - - Publishes the result back via NATS -5. Once `t` partial signatures are received, they are aggregated into a full signature. +All secrets are sourced from KMS via `--hsm-provider=env|aws|gcp`. No plaintext secrets in config. ---- +## Deployment -### Properties +### Docker Compose -- **No single point of compromise**: Keys are never fully assembled -- **Byzantine-resilient**: Only `t` of `n` nodes are required to proceed -- **Scalable and pluggable**: Easily expand the cluster or integrate additional tools -- **Secure peer authentication**: All inter-node messages are signed and verified using Ed25519 +```bash +docker compose up +``` -## Installation and Run +### Kubernetes -For full installation and run instructions, see [INSTALLATION.md](./INSTALLATION.md). +```bash +cd k8s && kubectl kustomize . | kubectl apply -f - +``` -## Preview usage +K8s manifests are in `k8s/` with Kustomize overlays. Production deployment uses `cloudbuild.yaml` for CI/CD to GHCR. -### Start nodes +## Testing -```shell -$ mpcium start -n node0 -$ mpcium start -n node1 -$ mpcium start -n node2 +```bash +go test ./... # 331 test functions +# End-to-end (3-node cluster) +cd e2e && make test ``` -### Client Implementations +## Papers -- **Go**: Available in the `pkg/client` directory. Check the `examples` folder for usage samples. -- **TypeScript**: Available at [github.com/fystack/mpcium-client-ts](https://github.com/fystack/mpcium-client-ts) +- [Lux Threshold MPC](https://github.com/luxfi/papers/blob/main/lux-threshold-mpc.pdf) -- protocol specification, security proofs +- [Lux LSS MPC](https://github.com/luxfi/papers/blob/main/lux-lss-mpc.pdf) -- linear secret sharing in MPC +- [Lux Validator MPC](https://github.com/luxfi/papers/blob/main/lux-validator-mpc.pdf) -- MPC integration with validator nodes +- [Lux HSM Boundary](https://github.com/luxfi/papers/blob/main/lux-hsm-boundary.pdf) -- HSM trust boundary analysis +- [Lux FHE MPC Hybrid](https://github.com/luxfi/papers/blob/main/lux-fhe-mpc-hybrid.pdf) -- FHE threshold decryption via MPC +- [Lux M-Chain MPC](https://github.com/luxfi/papers/blob/main/lux-mchain-mpc.pdf) -- MPC chain architecture -### Client +## Dependencies -```go +- [`luxfi/threshold`](https://github.com/luxfi/threshold) -- CGGMP21 and FROST protocol implementations +- [`luxfi/fhe`](https://github.com/luxfi/fhe) -- FHE threshold decryption sessions +- [`luxfi/hsm`](https://github.com/luxfi/hsm) -- HSM abstraction layer +- [`hanzoai/base`](https://github.com/hanzoai/base) -- Application framework -import ( - "github.com/fystack/mpcium/client" - "github.com/nats-io/nats.go" -) - - -func main () { - natsConn, err := nats.Connect(natsURL) - if err != nil { - logger.Fatal("Failed to connect to NATS", err) - } - defer natsConn.Close() - mpcClient := client.NewMPCClient(client.Options{ - NatsConn: natsConn, - KeyPath: "./event_initiator.key", - }) - err = mpcClient.OnWalletCreationResult(func(event mpc.KeygenSuccessEvent) { - logger.Info("Received wallet creation result", "event", event) - }) - if err != nil { - logger.Fatal("Failed to subscribe to wallet-creation results", err) - } - - walletID := uuid.New().String() - if err := mpcClient.CreateWallet(walletID); err != nil { - logger.Fatal("CreateWallet failed", err) - } - logger.Info("CreateWallet sent, awaiting result...", "walletID", walletID) -} -``` +## License + +Lux Ecosystem License v1.2. See [LICENSE](LICENSE). diff --git a/SECURITY.md b/SECURITY.md index 3333a75..647f8c5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,11 +1,11 @@ -# Mpcium Security Model +# Lux MPC Security Model ## Core Security Principles -Mpcium implements a threshold signature scheme with industry-standard security practices to protect cryptographic operations: +Lux MPC implements a threshold signature scheme with industry-standard security practices to protect cryptographic operations: 1. **Distributed Trust**: No single entity possesses complete private keys -2. **Threshold Cryptography**: Requires t+1 nodes to participate in signing operations +2. **Threshold Cryptography**: Requires t-out-of-n nodes to participate in signing operations 3. **End-to-End Verification**: All communications are signed and verified 4. **Defense in Depth**: Multiple layers of encryption and verification @@ -16,14 +16,14 @@ Mpcium implements a threshold signature scheme with industry-standard security p - **Ed25519 Keypairs**: Each node possesses a unique Ed25519 keypair for identity - **Identity Generation**: Secure identity creation with the `generate-identity` command: ``` - go run cmd/mpcium-cli/main.go generate-identity --node=node0 --peers=peers.json --encrypt + go run cmd/lux-mpc-cli/main.go generate-identity --node=node0 --peers=peers.json --encrypt ``` - **Metadata Tracking**: Each identity includes creation information, machine details, and timestamps - **Identity Verification**: All operations require cryptographic proof of identity ### Message Authentication -Every message in the Mpcium network undergoes rigorous verification: +Every message in the Lux MPC network undergoes rigorous verification: ```go // Messages are signed before transmission diff --git a/cloudbuild.yaml b/cloudbuild.yaml new file mode 100644 index 0000000..1b4305e --- /dev/null +++ b/cloudbuild.yaml @@ -0,0 +1,43 @@ +steps: + - id: build-mpcd-mpc + name: golang:1.26-alpine + entrypoint: sh + args: + - -c + - | + apk add --no-cache gcc musl-dev git + git config --global url."https://${_GITHUB_TOKEN}@github.com/".insteadOf "https://github.com/" + export GOPRIVATE=github.com/luxfi/*,github.com/hanzoai/* + CGO_ENABLED=0 go build -ldflags="-s -w" -o /workspace/mpcd ./cmd/mpcd + CGO_ENABLED=0 go build -ldflags="-s -w" -o /workspace/mpc ./cmd/mpc + + - id: image + name: gcr.io/cloud-builders/docker + entrypoint: sh + args: + - -c + - | + cat > /workspace/Dockerfile.slim <<'DOCKER' + FROM alpine:3.21 + RUN apk add --no-cache ca-certificates tzdata + WORKDIR /app + COPY mpcd /usr/local/bin/mpcd + COPY mpc /usr/local/bin/mpc + EXPOSE 8081 9651 9800 + ENTRYPOINT ["mpcd"] + DOCKER + docker build -t us-docker.pkg.dev/onyxplus-devnet/backend/mpc:v1.1.4 -f /workspace/Dockerfile.slim /workspace + waitFor: [build-mpcd-mpc] + + - id: push + name: gcr.io/cloud-builders/docker + args: [push, us-docker.pkg.dev/onyxplus-devnet/backend/mpc:v1.1.4] + waitFor: [image] + +substitutions: + _GITHUB_TOKEN: '' + +options: + machineType: E2_HIGHCPU_8 + logging: CLOUD_LOGGING_ONLY +timeout: 900s diff --git a/cmd/mpcium-cli/generate-identity.go b/cmd/mpc/generate-identity.go similarity index 90% rename from cmd/mpcium-cli/generate-identity.go rename to cmd/mpc/generate-identity.go index dc29af7..bea6e1a 100644 --- a/cmd/mpcium-cli/generate-identity.go +++ b/cmd/mpc/generate-identity.go @@ -15,6 +15,8 @@ import ( "filippo.io/age" "github.com/urfave/cli/v3" "golang.org/x/term" + + "github.com/luxfi/mpc/pkg/common/pathutil" ) // Identity structure (for identity.json) @@ -33,7 +35,7 @@ func requestPassword() (string, error) { // First password entry fmt.Print("Enter passphrase to encrypt private key: ") - bytePassword, err := term.ReadPassword(int(syscall.Stdin)) + bytePassword, err := term.ReadPassword(syscall.Stdin) fmt.Println() // newline after prompt if err != nil { return "", fmt.Errorf("failed to read passphrase: %w", err) @@ -42,7 +44,7 @@ func requestPassword() (string, error) { // Confirm password fmt.Print("Confirm passphrase: ") - byteConfirmation, err := term.ReadPassword(int(syscall.Stdin)) + byteConfirmation, err := term.ReadPassword(syscall.Stdin) fmt.Println() // newline after prompt if err != nil { return "", fmt.Errorf("failed to read confirmation passphrase: %w", err) @@ -92,6 +94,11 @@ func generateIdentity(ctx context.Context, c *cli.Command) error { return fmt.Errorf("error checking peers file: %w", err) } + // Validate the peers file path for security + if err := pathutil.ValidateFilePath(peersPath); err != nil { + return fmt.Errorf("invalid peers file path: %w", err) + } + // Read peers file peersData, err := os.ReadFile(peersPath) if err != nil { @@ -111,7 +118,7 @@ func generateIdentity(ctx context.Context, c *cli.Command) error { } // Create identity directory - if err := os.MkdirAll(identityDir, 0755); err != nil { + if err := os.MkdirAll(identityDir, 0750); err != nil { return fmt.Errorf("failed to create identity directory: %w", err) } @@ -152,7 +159,7 @@ func generateNodeIdentity(nodeName, nodeID, identityDir string, encrypt bool, pa if err != nil { return fmt.Errorf("failed to marshal identity: %w", err) } - if err := os.WriteFile(identityPath, identityBytes, 0644); err != nil { + if err := os.WriteFile(identityPath, identityBytes, 0600); err != nil { return fmt.Errorf("failed to write identity JSON: %w", err) } @@ -168,6 +175,11 @@ func generateNodeIdentity(nodeName, nodeID, identityDir string, encrypt bool, pa return fmt.Errorf("encrypted key file %s already exists. Use --overwrite to force", encryptedKeyPath) } + // Validate the encrypted key path for security + if err := pathutil.ValidateFilePath(encryptedKeyPath); err != nil { + return fmt.Errorf("invalid encrypted key file path: %w", err) + } + // Encrypt with age and passphrase outFile, err := os.Create(encryptedKeyPath) if err != nil { diff --git a/cmd/mpcium-cli/generate-initiator.go b/cmd/mpc/generate-initiator.go similarity index 92% rename from cmd/mpcium-cli/generate-initiator.go rename to cmd/mpc/generate-initiator.go index 3710430..a3ada0c 100644 --- a/cmd/mpcium-cli/generate-initiator.go +++ b/cmd/mpc/generate-initiator.go @@ -15,6 +15,8 @@ import ( "filippo.io/age" "github.com/urfave/cli/v3" + + "github.com/luxfi/mpc/pkg/common/pathutil" ) // Identity struct to store node metadata @@ -34,7 +36,7 @@ func generateInitiatorIdentity(ctx context.Context, c *cli.Command) error { overwrite := c.Bool("overwrite") // Create output directory if it doesn't exist - if err := os.MkdirAll(outputDir, 0755); err != nil { + if err := os.MkdirAll(outputDir, 0750); err != nil { return fmt.Errorf("failed to create output directory: %w", err) } @@ -97,7 +99,7 @@ func generateInitiatorIdentity(ctx context.Context, c *cli.Command) error { return fmt.Errorf("failed to marshal identity JSON: %w", err) } - if err := os.WriteFile(identityPath, identityBytes, 0643); err != nil { + if err := os.WriteFile(identityPath, identityBytes, 0600); err != nil { return fmt.Errorf("failed to save identity file: %w", err) } @@ -111,6 +113,12 @@ func generateInitiatorIdentity(ctx context.Context, c *cli.Command) error { // Create encrypted key file encKeyPath := keyPath + ".age" + + // Validate the encrypted key path for security + if err := pathutil.ValidateFilePath(encKeyPath); err != nil { + return fmt.Errorf("invalid encrypted key file path: %w", err) + } + outFile, err := os.Create(encKeyPath) if err != nil { return fmt.Errorf("failed to create encrypted private key file: %w", err) diff --git a/cmd/mpcium-cli/generate-peers.go b/cmd/mpc/generate-peers.go similarity index 92% rename from cmd/mpcium-cli/generate-peers.go rename to cmd/mpc/generate-peers.go index b91c936..9a9cd96 100644 --- a/cmd/mpcium-cli/generate-peers.go +++ b/cmd/mpc/generate-peers.go @@ -33,7 +33,7 @@ func generatePeers(ctx context.Context, c *cli.Command) error { // Create directory if it doesn't exist dir := filepath.Dir(outputPath) if dir != "." { - if err := os.MkdirAll(dir, 0755); err != nil { + if err := os.MkdirAll(dir, 0750); err != nil { return fmt.Errorf("failed to create directory: %w", err) } } @@ -56,7 +56,7 @@ func generatePeers(ctx context.Context, c *cli.Command) error { } // Write to file - if err := os.WriteFile(outputPath, peersJSON, 0644); err != nil { + if err := os.WriteFile(outputPath, peersJSON, 0600); err != nil { return fmt.Errorf("failed to write file: %w", err) } diff --git a/cmd/mpcium-cli/main.go b/cmd/mpc/main.go similarity index 81% rename from cmd/mpcium-cli/main.go rename to cmd/mpc/main.go index f2b9912..25c2417 100644 --- a/cmd/mpcium-cli/main.go +++ b/cmd/mpc/main.go @@ -16,8 +16,8 @@ const ( func main() { cmd := &cli.Command{ - Name: "mpcium", - Usage: "Fystack MPC node management tools", + Name: "mpc", + Usage: "MPC node management tools", Commands: []*cli.Command{ { Name: "generate-peers", @@ -125,11 +125,36 @@ func main() { }, Action: generateInitiatorIdentity, }, + { + Name: "recover", + Usage: "Recover database from encrypted backup files", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "backup-dir", + Aliases: []string{"b"}, + Usage: "Directory containing encrypted backup files", + Required: true, + }, + &cli.StringFlag{ + Name: "recovery-path", + Aliases: []string{"r"}, + Usage: "Target path for database recovery", + Required: true, + }, + &cli.BoolFlag{ + Name: "force", + Aliases: []string{"f"}, + Value: false, + Usage: "Force overwrite if recovery path already exists", + }, + }, + Action: recoverDatabase, + }, { Name: "version", Usage: "Display detailed version information", Action: func(ctx context.Context, c *cli.Command) error { - fmt.Printf("mpcium-cli version %s\n", VERSION) + fmt.Printf("mpc version %s\n", VERSION) return nil }, }, diff --git a/cmd/mpc/recovery.go b/cmd/mpc/recovery.go new file mode 100644 index 0000000..cdac3c3 --- /dev/null +++ b/cmd/mpc/recovery.go @@ -0,0 +1,64 @@ +package main + +import ( + "context" + "fmt" + "os" + "syscall" + + "github.com/urfave/cli/v3" + "golang.org/x/term" + + "github.com/luxfi/mpc/pkg/kvstore" +) + +// recoverDatabase handles the database recovery from encrypted backup files +func recoverDatabase(ctx context.Context, c *cli.Command) error { + backupDir := c.String("backup-dir") + recoveryPath := c.String("recovery-path") + force := c.Bool("force") + + if _, err := os.Stat(backupDir); os.IsNotExist(err) { + return fmt.Errorf("backup directory does not exist: %s", backupDir) + } + + if _, err := os.Stat(recoveryPath); err == nil && !force { + return fmt.Errorf("recovery path already exists: %s (use --force to overwrite)", recoveryPath) + } + + // Prompt for encryption key + var key []byte + fmt.Print("Enter backup encryption key: ") + keyBytes, err := term.ReadPassword(syscall.Stdin) + if err != nil { + return fmt.Errorf("failed to read encryption key: %w", err) + } + fmt.Println() // Add newline after password input + key = keyBytes + if len(key) == 0 { + return fmt.Errorf("encryption key cannot be empty") + } + + // Remove existing recovery path if force flag is set + if force { + if err := os.RemoveAll(recoveryPath); err != nil { + return fmt.Errorf("failed to remove existing recovery path: %w", err) + } + } + + fmt.Printf("Starting database recovery...\n") + fmt.Printf("Backup directory: %s\n", backupDir) + fmt.Printf("Recovery path: %s\n", recoveryPath) + + // Create a temporary backup executor to access the backup files + tempExecutor := kvstore.NewBackup("temp", nil, key, backupDir) + + // Perform the recovery using the existing method with specified recovery path + if err := tempExecutor.RestoreAllBackupsEncrypted(recoveryPath, key); err != nil { + return fmt.Errorf("recovery failed: %w", err) + } + + fmt.Printf("โœ… Database recovery completed successfully!\n") + fmt.Printf("Restored database is available at: %s\n", recoveryPath) + return nil +} diff --git a/cmd/mpcium-cli/register-peers.go b/cmd/mpc/register-peers.go similarity index 83% rename from cmd/mpcium-cli/register-peers.go rename to cmd/mpc/register-peers.go index d19d0a2..700306c 100644 --- a/cmd/mpcium-cli/register-peers.go +++ b/cmd/mpc/register-peers.go @@ -6,11 +6,13 @@ import ( "fmt" "os" - "github.com/fystack/mpcium/pkg/config" - "github.com/fystack/mpcium/pkg/infra" - "github.com/fystack/mpcium/pkg/logger" "github.com/hashicorp/consul/api" "github.com/urfave/cli/v3" + + "github.com/luxfi/mpc/pkg/common/pathutil" + "github.com/luxfi/mpc/pkg/config" + "github.com/luxfi/mpc/pkg/infra" + "github.com/luxfi/mpc/pkg/logger" ) func registerPeers(ctx context.Context, c *cli.Command) error { @@ -20,6 +22,11 @@ func registerPeers(ctx context.Context, c *cli.Command) error { // Hardcoded prefix for MPC peers in Consul prefix := "mpc_peers/" + // Validate the input file path for security + if err := pathutil.ValidateFilePath(inputPath); err != nil { + return fmt.Errorf("invalid input file path: %w", err) + } + // Check if input file exists if _, err := os.Stat(inputPath); os.IsNotExist(err) { return fmt.Errorf("input file %s does not exist", inputPath) diff --git a/cmd/mpcium-cli/utils.go b/cmd/mpc/utils.go similarity index 100% rename from cmd/mpcium-cli/utils.go rename to cmd/mpc/utils.go diff --git a/cmd/mpcd/main.go b/cmd/mpcd/main.go new file mode 100644 index 0000000..0c716b7 --- /dev/null +++ b/cmd/mpcd/main.go @@ -0,0 +1,1355 @@ +package main + +import ( + "context" + "crypto/ed25519" + crypto_elliptic "crypto/elliptic" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "net" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/crypto/ripemd160" + "golang.org/x/crypto/sha3" + + "github.com/mr-tron/base58" + + "github.com/nats-io/nats.go" + "github.com/spf13/viper" + "github.com/urfave/cli/v3" + + "github.com/hanzoai/base" + "github.com/hanzoai/base/apis" + "github.com/hanzoai/base/core" + "github.com/luxfi/hsm" + + uimpc "github.com/luxfi/mpc/ui" + + mpcapi "github.com/luxfi/mpc/pkg/api" + "github.com/luxfi/mpc/pkg/backup" + "github.com/luxfi/mpc/pkg/db" + "github.com/luxfi/mpc/pkg/event" + "github.com/luxfi/mpc/pkg/eventconsumer" + "github.com/luxfi/mpc/pkg/keyinfo" + "github.com/luxfi/mpc/pkg/kvstore" + "github.com/luxfi/mpc/pkg/logger" + "github.com/luxfi/mpc/pkg/messaging" + "github.com/luxfi/mpc/pkg/mpc" + "github.com/luxfi/mpc/pkg/transport" + "github.com/luxfi/mpc/pkg/types" +) + +const ( + Version = "0.3.3" + DefaultBackupPeriodSeconds = 300 // (5 minutes) +) + +func main() { + app := &cli.Command{ + Name: "mpcd", + Usage: "MPC daemon for threshold signatures (consensus-embedded)", + Version: Version, + Commands: []*cli.Command{ + { + Name: "start", + Usage: "Start a Lux MPC node", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "node-id", + Usage: "Node ID", + }, + &cli.StringFlag{ + Name: "listen", + Usage: "P2P listen address", + Value: ":9651", + }, + &cli.StringFlag{ + Name: "api", + Usage: "Internal API listen address", + Value: ":9800", + }, + &cli.StringFlag{ + Name: "data", + Usage: "Data directory", + }, + &cli.StringFlag{ + Name: "keys", + Usage: "Keys directory", + }, + &cli.IntFlag{ + Name: "threshold", + Aliases: []string{"t"}, + Usage: "Signing threshold", + Value: 2, + }, + &cli.StringSliceFlag{ + Name: "peer", + Usage: "Peer address (can be specified multiple times)", + }, + &cli.StringFlag{ + Name: "log-level", + Usage: "Log level (debug, info, warn, error)", + Value: "info", + }, + &cli.StringFlag{ + Name: "api-listen", + Usage: "Dashboard API listen address", + Value: ":8081", + }, + &cli.StringFlag{ + Name: "jwt-secret", + Usage: "JWT signing secret for dashboard auth", + }, + // HSM / password provider flags + &cli.StringFlag{ + Name: "hsm-provider", + Usage: "Password provider type: aws|gcp|azure|env|file (default: env)", + Sources: cli.EnvVars("MPC_HSM_PROVIDER"), + Value: "env", + }, + &cli.StringFlag{ + Name: "hsm-key-id", + Usage: "HSM key ARN/name/path for ZapDB password decryption", + Sources: cli.EnvVars("MPC_HSM_KEY_ID"), + }, + // HSM signer flags (for co-signing) + &cli.StringFlag{ + Name: "hsm-signer", + Usage: "Signer provider for intent co-signing: aws|gcp|azure|zymbit|mldsa|local (default: local)", + Sources: cli.EnvVars("MPC_HSM_SIGNER"), + Value: "local", + }, + &cli.StringFlag{ + Name: "hsm-signer-key-id", + Usage: "HSM signer key ARN/name for co-signing operations", + Sources: cli.EnvVars("MPC_HSM_SIGNER_KEY_ID"), + }, + &cli.BoolFlag{ + Name: "hsm-attest", + Usage: "Enable HSM attestation on threshold signature shares (binds shares to hardware)", + Sources: cli.EnvVars("MPC_HSM_ATTEST"), + Value: false, + }, + &cli.BoolFlag{ + Name: "debug", + Usage: "Enable debug logging", + Value: false, + }, + }, + Action: func(ctx context.Context, c *cli.Command) error { + return runNodeConsensus(ctx, c) + }, + }, + { + Name: "version", + Usage: "Display detailed version information", + Action: func(ctx context.Context, c *cli.Command) error { + fmt.Printf("mpcd version %s\n", Version) + return nil + }, + }, + }, + } + + if err := app.Run(context.Background(), os.Args); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +// checkRequiredConfigValues verifies required viper config values are present. +// skipPasswordCheck should be true when ZapDB password will be resolved via HSM provider. +func checkRequiredConfigValues(skipPasswordCheck bool) { + // Show warning if we're using file-based config but no password is set + if !skipPasswordCheck && viper.GetString("zapdb_password") == "" { + logger.Fatal("ZapDB password is required", nil) + } + + if viper.GetString("event_initiator_pubkey") == "" { + logger.Fatal("Event initiator public key is required", nil) + } +} + +// resolveZapDBPassword returns the ZapDB encryption password using the HSM +// password provider infrastructure. Provider type comes from --hsm-provider +// (or MPC_HSM_PROVIDER env var), defaulting to "env" for backward compat. +// When a cloud provider (aws/gcp/azure) is configured, the password is +// decrypted from ZAPDB_ENCRYPTED_PASSWORD via the cloud KMS at startup. +func resolveZapDBPassword(ctx context.Context, c *cli.Command) string { + hsmProviderType := c.String("hsm-provider") + hsmKeyID := c.String("hsm-key-id") + environment := viper.GetString("environment") + + // In production, env/file password providers are rejected. ZapDB passwords + // must come from a cloud HSM (aws/gcp/azure) so they aren't readable via kubectl exec. + if environment == "production" && (hsmProviderType == "" || hsmProviderType == "env" || hsmProviderType == "file") { + logger.Fatal("Production requires cloud HSM password provider (aws/gcp/azure); env/file providers are not permitted", + fmt.Errorf("MPC_HSM_PROVIDER=%q", hsmProviderType)) + } + + provider, err := hsm.NewPasswordProvider(hsmProviderType, nil) + if err != nil { + logger.Fatal("Failed to create HSM password provider", fmt.Errorf("provider=%s: %w", hsmProviderType, err)) + } + + password, err := provider.GetPassword(ctx, hsmKeyID) + if err != nil { + if environment == "production" { + logger.Fatal("HSM provider failed in production; cannot fall back to config", + fmt.Errorf("provider=%s: %w", hsmProviderType, err)) + } + // Fall back to viper config for dev/staging only + password = viper.GetString("zapdb_password") + if password == "" { + logger.Fatal("ZapDB password is required: HSM provider failed and no zapdb_password in config", + fmt.Errorf("provider=%s: %w", hsmProviderType, err)) + } + logger.Warn("ZapDB password loaded from config (non-production only)", "environment", environment) + return password + } + + logger.Info("ZapDB password loaded via HSM provider", "provider", hsmProviderType) + return password +} + +func NewZapKV(nodeName, nodeID, password string) *kvstore.Store { + // ZapDB KV store + // Use configured db_path or default to current directory + "db" + basePath := viper.GetString("db_path") + if basePath == "" { + basePath = filepath.Join(".", "db") + } + dbPath := filepath.Join(basePath, nodeName) + + // Use configured backup_dir or default to current directory + "backups" + backupDir := viper.GetString("backup_dir") + if backupDir == "" { + backupDir = filepath.Join(".", "backups") + } + + // Create ZapDB config + config := kvstore.Config{ + NodeID: nodeName, + Key: []byte(password), + BackupKey: []byte(password), // Using same key for backup encryption + Dir: backupDir, + Path: dbPath, + } + + kv, err := kvstore.New(config) + if err != nil { + logger.Fatal("Failed to create zapdb store", err) + } + logger.Info("Connected to zapdb store", "path", dbPath, "backup_dir", backupDir) + return kv +} + +func StartPeriodicBackup(ctx context.Context, zapKV *kvstore.Store, periodSeconds int) func() { + if periodSeconds <= 0 { + periodSeconds = DefaultBackupPeriodSeconds + } + backupTicker := time.NewTicker(time.Duration(periodSeconds) * time.Second) + backupCtx, backupCancel := context.WithCancel(ctx) + go func() { + for { + select { + case <-backupCtx.Done(): + logger.Info("Backup background job stopped") + return + case <-backupTicker.C: + logger.Info("Running periodic ZapDB backup...") + err := zapKV.Backup() + if err != nil { + logger.Error("Periodic ZapDB backup failed", err) + } else { + logger.Info("Periodic ZapDB backup completed successfully") + } + } + } + }() + return backupCancel +} + +// runNodeConsensus runs the MPC node with consensus-embedded transport +func runNodeConsensus(ctx context.Context, c *cli.Command) error { + nodeID := c.String("node-id") + listenAddr := c.String("listen") + dataDir := c.String("data") + keysDir := c.String("keys") + threshold := c.Int("threshold") + peers := c.StringSlice("peer") + logLevel := c.String("log-level") + debug := c.Bool("debug") + + if nodeID == "" { + return fmt.Errorf("--node-id is required in consensus mode") + } + if dataDir == "" { + return fmt.Errorf("--data is required in consensus mode") + } + + // Initialize logger + logger.Init("consensus", debug || logLevel == "debug") + logger.Info("Starting MPC node in consensus mode", + "nodeID", nodeID, + "listen", listenAddr, + "dataDir", dataDir, + "threshold", threshold, + "peers", len(peers), + ) + + // Ensure directories exist + if err := os.MkdirAll(dataDir, 0750); err != nil { + return fmt.Errorf("failed to create data directory: %w", err) + } + if keysDir == "" { + keysDir = filepath.Join(dataDir, "keys") + } + if err := os.MkdirAll(keysDir, 0700); err != nil { + return fmt.Errorf("failed to create keys directory: %w", err) + } + + // Load or generate identity + privKey, pubKey, err := loadOrGenerateIdentity(keysDir, nodeID) + if err != nil { + return fmt.Errorf("failed to load/generate identity: %w", err) + } + + // Create consensus identity store for verifying messages + consensusIdentity := NewConsensusIdentityStore(nodeID, privKey, pubKey) + + // Build peer map + peerMap := make(map[string]string) + peerMap[nodeID] = listenAddr + for i, peer := range peers { + // Parse peer address - format: "nodeID@host:port" or just "host:port" + parts := strings.SplitN(peer, "@", 2) + if len(parts) == 2 { + peerMap[parts[0]] = parts[1] + } else { + peerMap[fmt.Sprintf("peer-%d", i)] = peer + } + } + + // Get ZapDB password via HSM provider (supports AWS KMS, GCP Cloud KMS, Azure Key Vault, env, file) + zapDBPassword := resolveZapDBPassword(ctx, c) + + // Create transport factory (uses ZapDB for embedded key-share storage) + factoryCfg := transport.FactoryConfig{ + NodeID: nodeID, + ListenAddr: listenAddr, + Peers: peerMap, + PrivateKey: privKey, + PublicKey: pubKey, + ZapDBPath: filepath.Join(dataDir, "db"), + ZapDBPassword: zapDBPassword, + BackupDir: filepath.Join(dataDir, "backups"), + } + + factory, err := transport.NewFactory(factoryCfg) + if err != nil { + return fmt.Errorf("failed to create transport factory: %w", err) + } + + // Start transport + if err := factory.Start(ctx); err != nil { + return fmt.Errorf("failed to start transport: %w", err) + } + defer factory.Stop() + + // Create MPC node with consensus transport + peerIDs := make([]string, 0, len(peerMap)-1) + for id := range peerMap { + if id != nodeID { + peerIDs = append(peerIDs, id) + } + } + + // Create PubSub adapter for messaging + pubSub := NewConsensusPubSubAdapter(factory.PubSub()) + + // Create message queue adapters + genKeyResultQueue := NewConsensusMessageQueue(factory.Transport(), nodeID, "keygen") + signingResultQueue := NewConsensusMessageQueue(factory.Transport(), nodeID, "signing") + reshareResultQueue := NewConsensusMessageQueue(factory.Transport(), nodeID, "reshare") + + logger.Info("Node is running in consensus mode", "nodeID", nodeID) + + // Create peer registry using consensus membership + peerRegistry := NewConsensusPeerRegistry(factory.Registry(), nodeID, peerIDs) + + // Create MPC node + mpcNode := mpc.NewNode( + nodeID, + peerIDs, + pubSub, + factory.KVStore(), + NewConsensusKeyInfoStore(factory.KeyInfoStore(), peerRegistry), + peerRegistry, + consensusIdentity, + ) + + // Create event consumer + eventConsumer := eventconsumer.NewEventConsumer( + mpcNode, + pubSub, + genKeyResultQueue, + signingResultQueue, + reshareResultQueue, + consensusIdentity, + ) + eventConsumer.Run() + defer eventConsumer.Close() + + // Mark as ready + if err := peerRegistry.Ready(); err != nil { + logger.Error("Failed to mark peer registry as ready", err) + } + logger.Info("[READY] Node is ready (consensus mode)", "nodeID", nodeID) + + // Start HTTP API server (internal MPC node API on port 9800) + apiAddr := c.String("api") + if apiAddr != "" { + // Internal API bearer token โ€” required for all endpoints except /health. + // Source: MPC_INTERNAL_API_KEY env var. In production the StatefulSet + // injects this from the KMS-synced mpc-secrets K8s Secret. + internalAPIKey := os.Getenv("MPC_INTERNAL_API_KEY") + if internalAPIKey == "" { + // Derive a deterministic key from the node's Ed25519 private key so + // all nodes in the cluster share the same key without extra config. + // SHA-256(privKey || "mpc-internal-api") truncated to hex. + h := sha256.Sum256(append(privKey.Seed(), []byte("mpc-internal-api")...)) + internalAPIKey = hex.EncodeToString(h[:]) + logger.Warn("MPC_INTERNAL_API_KEY not set; derived internal API key from node identity (set MPC_INTERNAL_API_KEY in production)") + } + + // Rate limiter: 10 requests/min for mutating endpoints (keygen, backup). + internalRL := mpcapi.NewRateLimiter(10) + + // internalAuth is middleware that gates all mutating internal endpoints + // behind a bearer token. /health is exempt (K8s probes need it). + internalAuth := func(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || auth != "Bearer "+internalAPIKey { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(w).Encode(map[string]string{"error": "unauthorized"}) + return + } + next.ServeHTTP(w, r) + } + } + + // internalRateLimit wraps a handler with the tight rate limiter. + internalRateLimit := func(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + ip := r.RemoteAddr + if host, _, err := net.SplitHostPort(ip); err == nil { + ip = host + } + if !internalRL.Allow(ip) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusTooManyRequests) + json.NewEncoder(w).Encode(map[string]string{"error": "rate limit exceeded"}) + return + } + next.ServeHTTP(w, r) + } + } + + mux := http.NewServeMux() + // Health probe handler โ€” unauthenticated (K8s liveness/readiness probes). + // Served on both /health (legacy) and /healthz (platform standard). + healthHandler := func(w http.ResponseWriter, r *http.Request) { + ready := peerRegistry.ArePeersReady() + connected := factory.Transport().GetPeers() + status := "healthy" + httpCode := http.StatusOK + if !ready { + status = "degraded" + httpCode = http.StatusServiceUnavailable + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(httpCode) + resp := map[string]interface{}{ + "status": status, + "node_id": nodeID, + "mode": "consensus", + "expected_peers": len(peerIDs), + "connected_peers": len(connected), + "ready": ready, + "threshold": threshold, + "version": Version, + } + json.NewEncoder(w).Encode(resp) + } + mux.HandleFunc("/healthz", healthHandler) + mux.HandleFunc("/keys", internalAuth(func(w http.ResponseWriter, r *http.Request) { + keys, err := factory.KeyInfoStore().ListKeys() + w.Header().Set("Content-Type", "application/json") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(map[string]string{"error": err.Error()}) + return + } + json.NewEncoder(w).Encode(keys) + })) + mux.HandleFunc("/backup", internalAuth(internalRateLimit(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + logger.Info("Audit: backup triggered", "nodeID", nodeID, "remote", r.RemoteAddr) + if zapKV, ok := factory.KVStore().(*kvstore.Store); ok && zapKV.Exec != nil { + s3Cfg := backup.S3ConfigFromEnv(nodeID) + mgr, err := backup.NewManager(zapKV.Exec, filepath.Join(dataDir, "backups"), nodeID, 0, s3Cfg) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(map[string]string{"error": err.Error()}) + return + } + if err := mgr.RunBackup(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(map[string]string{"error": err.Error()}) + return + } + json.NewEncoder(w).Encode(map[string]string{"status": "backup completed"}) + } else { + w.WriteHeader(http.StatusServiceUnavailable) + json.NewEncoder(w).Encode(map[string]string{"error": "backup not available"}) + } + }))) + mux.HandleFunc("/keygen", internalAuth(internalRateLimit(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + + if !peerRegistry.ArePeersReady() { + w.WriteHeader(http.StatusServiceUnavailable) + json.NewEncoder(w).Encode(map[string]string{"error": "peers not ready"}) + return + } + + // Parse request body โ€” orgID is required for tenant isolation. + var req struct { + OrgID string `json:"org_id"` + WalletID string `json:"wallet_id"` + } + if r.Body != nil { + json.NewDecoder(r.Body).Decode(&req) + } + if req.OrgID == "" { + w.WriteHeader(http.StatusBadRequest) + json.NewEncoder(w).Encode(map[string]string{"error": "org_id is required"}) + return + } + if req.WalletID == "" { + // Generate a deterministic wallet ID from timestamp + node + h := sha256.Sum256([]byte(fmt.Sprintf("%s-%d", nodeID, time.Now().UnixNano()))) + req.WalletID = hex.EncodeToString(h[:16]) + } + + walletID := req.WalletID + + // Subscribe to the result topic before triggering keygen + resultTopic := fmt.Sprintf("mpc.mpc_keygen_result.%s", walletID) + resultCh := make(chan *event.KeygenResultEvent, 1) + unsub, err := pubSub.Subscribe(resultTopic, func(natMsg *nats.Msg) { + var result event.KeygenResultEvent + if err := json.Unmarshal(natMsg.Data, &result); err == nil { + select { + case resultCh <- &result: + default: + } + } + }) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(map[string]string{"error": "failed to subscribe to result topic"}) + return + } + defer unsub.Unsubscribe() + + // Create and publish GenerateKeyMessage, signed by this node + sig := consensusIdentity.SignMessage([]byte(walletID)) + msg := types.GenerateKeyMessage{ + OrgID: req.OrgID, + WalletID: walletID, + Signature: sig, + } + msgData, _ := json.Marshal(msg) + + if err := pubSub.Publish("mpc:generate", msgData); err != nil { + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(map[string]string{"error": fmt.Sprintf("failed to publish keygen: %v", err)}) + return + } + + logger.Info("Audit: keygen triggered", "nodeID", nodeID, "orgID", req.OrgID, "walletID", walletID, "remote", r.RemoteAddr) + + // Wait for result with 60s timeout + select { + case result := <-resultCh: + resp := map[string]interface{}{ + "wallet_id": result.WalletID, + "result_type": result.ResultType, + } + if result.ResultType == event.ResultTypeSuccess { + resp["ecdsa_pub_key"] = hex.EncodeToString(result.ECDSAPubKey) + resp["eddsa_pub_key"] = hex.EncodeToString(result.EDDSAPubKey) + if len(result.ECDSAPubKey) >= 32 { + resp["eth_address"] = pubKeyToEthAddress(result.ECDSAPubKey) + resp["btc_address"] = pubKeyToBtcAddress(result.ECDSAPubKey) + } + if len(result.EDDSAPubKey) == 32 { + resp["sol_address"] = eddsaPubKeyToSolAddress(result.EDDSAPubKey) + } + } else { + resp["error"] = result.ErrorReason + resp["error_code"] = result.ErrorCode + } + json.NewEncoder(w).Encode(resp) + case <-time.After(60 * time.Second): + w.WriteHeader(http.StatusGatewayTimeout) + json.NewEncoder(w).Encode(map[string]string{ + "error": "keygen timed out after 60s", + "wallet_id": walletID, + }) + } + }))) + + srv := &http.Server{ + Addr: apiAddr, + Handler: http.MaxBytesHandler(mux, 1<<20), // 1 MB body limit + ReadTimeout: 30 * time.Second, + ReadHeaderTimeout: 10 * time.Second, + WriteTimeout: 90 * time.Second, // keygen can take 60s + IdleTimeout: 120 * time.Second, + } + go func() { + logger.Info("HTTP API server starting", "addr", apiAddr) + if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + logger.Error("HTTP API server failed", err) + } + }() + defer srv.Close() + } + + // Start periodic backup with optional S3 upload + backupDir := filepath.Join(dataDir, "backups") + if zapKV, ok := factory.KVStore().(*kvstore.Store); ok && zapKV.Exec != nil { + s3Cfg := backup.S3ConfigFromEnv(nodeID) + backupMgr, err := backup.NewManager(zapKV.Exec, backupDir, nodeID, 5*time.Minute, s3Cfg) + if err != nil { + logger.Warn("Failed to create backup manager", "err", err) + } else { + backupMgr.Start() + defer backupMgr.Stop() + logger.Info("Backup manager started", "period", "5m", "s3", s3Cfg != nil) + } + } + + // Start Dashboard API server (ZapDB-backed, no external dependencies) + apiListenAddr := c.String("api-listen") + jwtSecret := c.String("jwt-secret") + if jwtSecret == "" { + jwtSecret = os.Getenv("MPC_JWT_SECRET") + } + if jwtSecret != "" { + dbPath := filepath.Join(dataDir, "dashboard.db") + database, err := db.New(dbPath, "") + if err != nil { + logger.Error("Failed to open dashboard database", err) + } else { + defer database.Close() + + mpcBackend := &ConsensusMPCBackend{ + pubSub: pubSub, + peerRegistry: peerRegistry, + factory: factory, + keyInfoStore: factory.KeyInfoStore(), + identity: consensusIdentity, + nodeID: nodeID, + threshold: threshold, + } + + apiServer := mpcapi.NewServer(database, mpcBackend, jwtSecret) + apiServer.StartScheduler(ctx) + + // Wire HSM signer for intent co-signing + signerType := c.String("hsm-signer") + if signerType != "" { + signer, signerErr := hsm.NewSigner(signerType, nil) + if signerErr != nil { + logger.Error("Failed to create HSM signer", signerErr, "provider", signerType) + } else { + apiServer.SetHSM(signer) + logger.Info("HSM signer configured for co-signing", "provider", signer.Provider()) + } + } + + // HSM threshold attestation (key share vault storage) + if c.Bool("hsm-attest") { + logger.Info("HSM threshold attestation enabled", + "signer", c.String("hsm-signer"), + "attest_key", c.String("hsm-signer-key-id"), + "vault_provider", c.String("hsm-provider"), + ) + } + + // Mount chi handler on Base + os.Args = []string{"mpcd", "serve", "--http", apiListenAddr} + baseApp := base.New() + baseApp.OnServe().BindFunc(func(e *core.ServeEvent) error { + // Embedded admin UI at /_/mpc/ + e.Router.GET("/_/mpc/{path...}", apis.Static(uimpc.DistDirFS(), true)) + + e.Router.Any("/{path...}", func(re *core.RequestEvent) error { + apiServer.Handler().ServeHTTP(re.Response, re.Request) + return nil + }) + return e.Next() + }) + + logger.Info("Dashboard API starting (Base+SQLite)", "addr", apiListenAddr) + go func() { + if err := baseApp.Start(); err != nil { + logger.Error("Dashboard API server failed", err) + } + }() + + logger.Info("Dashboard API ready", "addr", apiListenAddr, "db", dbPath) + } + } + + // Wait for shutdown signal + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) + <-sigChan + + logger.Warn("Shutdown signal received, stopping...") + return nil +} + +// ConsensusMPCBackend implements api.MPCBackend using the consensus transport. +type ConsensusMPCBackend struct { + pubSub *ConsensusPubSubAdapter + peerRegistry *ConsensusPeerRegistry + factory *transport.Factory + keyInfoStore *transport.KeyInfoStore + identity *ConsensusIdentityStore + nodeID string + threshold int +} + +func (b *ConsensusMPCBackend) TriggerKeygen(orgID, walletID string) (*mpcapi.KeygenResult, error) { + if walletID == "" { + h := sha256.Sum256([]byte(fmt.Sprintf("%s-%d", b.nodeID, time.Now().UnixNano()))) + walletID = hex.EncodeToString(h[:16]) + } + + resultTopic := fmt.Sprintf("mpc.mpc_keygen_result.%s", walletID) + resultCh := make(chan *event.KeygenResultEvent, 1) + unsub, err := b.pubSub.Subscribe(resultTopic, func(natMsg *nats.Msg) { + var result event.KeygenResultEvent + if err := json.Unmarshal(natMsg.Data, &result); err == nil { + select { + case resultCh <- &result: + default: + } + } + }) + if err != nil { + return nil, fmt.Errorf("failed to subscribe to result topic: %w", err) + } + defer unsub.Unsubscribe() + + sig := b.identity.SignMessage([]byte(walletID)) + msg := types.GenerateKeyMessage{OrgID: orgID, WalletID: walletID, Signature: sig} + msgData, _ := json.Marshal(msg) + if err := b.pubSub.Publish("mpc:generate", msgData); err != nil { + return nil, fmt.Errorf("failed to publish keygen: %w", err) + } + + select { + case result := <-resultCh: + if result.ResultType != event.ResultTypeSuccess { + return nil, fmt.Errorf("keygen failed: %s", result.ErrorReason) + } + ethAddr := "" + btcAddr := "" + solAddr := "" + if len(result.ECDSAPubKey) >= 32 { + ethAddr = pubKeyToEthAddress(result.ECDSAPubKey) + btcAddr = pubKeyToBtcAddress(result.ECDSAPubKey) + } + if len(result.EDDSAPubKey) == 32 { + solAddr = eddsaPubKeyToSolAddress(result.EDDSAPubKey) + } + return &mpcapi.KeygenResult{ + WalletID: result.WalletID, + ECDSAPubKey: hex.EncodeToString(result.ECDSAPubKey), + EDDSAPubKey: hex.EncodeToString(result.EDDSAPubKey), + EthAddress: ethAddr, + BtcAddress: btcAddr, + SolAddress: solAddr, + }, nil + case <-time.After(120 * time.Second): + return nil, fmt.Errorf("keygen timed out after 120s") + } +} + +func (b *ConsensusMPCBackend) TriggerSign(orgID, walletID string, payload []byte) (*mpcapi.SignResult, error) { + txID := fmt.Sprintf("sign-%d", time.Now().UnixNano()) + resultTopic := fmt.Sprintf("mpc.mpc_signing_result.%s", walletID) + resultCh := make(chan json.RawMessage, 1) + unsub, err := b.pubSub.Subscribe(resultTopic, func(natMsg *nats.Msg) { + select { + case resultCh <- natMsg.Data: + default: + } + }) + if err != nil { + return nil, fmt.Errorf("failed to subscribe to signing result: %w", err) + } + defer unsub.Unsubscribe() + + // Look up key type from key info store + keyType := types.KeyTypeSecp256k1 // default for ECDSA + if b.keyInfoStore != nil { + if info, err := b.keyInfoStore.Get(walletID); err == nil && info.KeyType != "" { + keyType = types.KeyType(info.KeyType) + } + } + // Normalize legacy key type names + switch keyType { + case "ecdsa", "ECDSA": + keyType = types.KeyTypeSecp256k1 + case "eddsa", "EDDSA": + keyType = types.KeyTypeEd25519 + } + + msg := types.SignTxMessage{ + OrgID: orgID, + KeyType: keyType, + WalletID: walletID, + TxID: txID, + Tx: payload, + } + // Sign the message with the node's private key + raw, _ := msg.Raw() + msg.Signature = b.identity.SignMessage(raw) + msgData, _ := json.Marshal(msg) + if err := b.pubSub.Publish("mpc:sign", msgData); err != nil { + return nil, fmt.Errorf("failed to publish sign request: %w", err) + } + + select { + case data := <-resultCh: + var result struct { + ResultType string `json:"result_type"` + ErrorReason string `json:"error_reason"` + R []byte `json:"r"` + S []byte `json:"s"` + SignatureRecovery []byte `json:"signature_recovery"` + Signature []byte `json:"signature"` + } + if err := json.Unmarshal(data, &result); err != nil { + return nil, fmt.Errorf("failed to unmarshal signing result: %w", err) + } + if result.ResultType == "error" { + return nil, fmt.Errorf("MPC signing failed: %s", result.ErrorReason) + } + sigR := hex.EncodeToString(result.R) + sigS := hex.EncodeToString(result.S) + var sigHex string + if len(result.Signature) > 0 { + sigHex = hex.EncodeToString(result.Signature) + } + return &mpcapi.SignResult{R: sigR, S: sigS, Signature: sigHex}, nil + case <-time.After(60 * time.Second): + return nil, fmt.Errorf("signing timed out after 60s") + } +} + +func (b *ConsensusMPCBackend) TriggerReshare(orgID, walletID string, newThreshold int, newParticipants []string) error { + msg := map[string]interface{}{ + "org_id": orgID, + "wallet_id": walletID, + "new_threshold": newThreshold, + "new_participants": newParticipants, + } + msgData, _ := json.Marshal(msg) + return b.pubSub.Publish("mpc:reshare", msgData) +} + +func (b *ConsensusMPCBackend) ExportKeyShare(orgID, walletID string) ([]byte, error) { + key := mpc.OrgScopedKey(orgID, walletID) + return b.factory.KVStore().Get(key) +} + +func (b *ConsensusMPCBackend) GetClusterStatus() *mpcapi.ClusterStatus { + ready := b.peerRegistry.ArePeersReady() + connected := b.factory.Transport().GetPeers() + return &mpcapi.ClusterStatus{ + NodeID: b.nodeID, + Mode: "consensus", + ExpectedPeers: len(b.peerRegistry.peerIDs), + ConnectedPeers: len(connected), + Ready: ready, + Threshold: b.threshold, + Version: Version, + } +} + +// loadOrGenerateIdentity loads or generates Ed25519 identity +func loadOrGenerateIdentity(keysDir, nodeID string) (ed25519.PrivateKey, ed25519.PublicKey, error) { + identityPath := filepath.Join(keysDir, nodeID+"_identity.json") + + // Try to load existing identity + data, err := os.ReadFile(identityPath) + if err == nil { + var identityData struct { + NodeID string `json:"node_id"` + PublicKey string `json:"public_key"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(data, &identityData); err == nil { + privKeyBytes, err := hex.DecodeString(identityData.PrivateKey) + if err == nil && len(privKeyBytes) == ed25519.PrivateKeySize { + privKey := ed25519.PrivateKey(privKeyBytes) + pubKey := privKey.Public().(ed25519.PublicKey) + logger.Info("Loaded existing identity", "nodeID", nodeID) + return privKey, pubKey, nil + } + } + } + + // Generate new identity + pubKey, privKey, err := ed25519.GenerateKey(nil) + if err != nil { + return nil, nil, err + } + + // Save identity + identityData := map[string]string{ + "node_id": nodeID, + "public_key": hex.EncodeToString(pubKey), + "private_key": hex.EncodeToString(privKey), + } + data, err = json.MarshalIndent(identityData, "", " ") + if err != nil { + return nil, nil, err + } + if err := os.WriteFile(identityPath, data, 0600); err != nil { + return nil, nil, err + } + + logger.Info("Generated new identity", "nodeID", nodeID) + return privKey, pubKey, nil +} + +// ConsensusIdentityStore implements identity.Store for consensus mode +type ConsensusIdentityStore struct { + nodeID string + privateKey ed25519.PrivateKey + publicKey ed25519.PublicKey + initiatorPubKey ed25519.PublicKey + publicKeys map[string][]byte + mu sync.RWMutex +} + +func NewConsensusIdentityStore(nodeID string, privKey ed25519.PrivateKey, pubKey ed25519.PublicKey) *ConsensusIdentityStore { + s := &ConsensusIdentityStore{ + nodeID: nodeID, + privateKey: privKey, + publicKey: pubKey, + publicKeys: make(map[string][]byte), + } + s.publicKeys[nodeID] = pubKey + + // Load the event initiator public key from viper config. + // This Ed25519 public key is used to verify that inbound event + // messages (keygen, signing, reshare) originated from the authorized + // initiator and have not been tampered with. + if initiatorHex := viper.GetString("event_initiator_pubkey"); initiatorHex != "" { + if decoded, err := hex.DecodeString(initiatorHex); err == nil && len(decoded) == ed25519.PublicKeySize { + s.initiatorPubKey = ed25519.PublicKey(decoded) + } + } + + // In consensus mode, if no explicit initiator key is configured, + // skip initiator verification since all messages come from within + // the trusted cluster. The internal API (port 9800) is not + // exposed externally. + if s.initiatorPubKey == nil { + logger.Info("No explicit initiator key configured; initiator verification will be skipped (consensus mode)") + } + + return s +} + +// SignMessage signs a message payload with the node's private key. +// Used by HTTP endpoints to sign event messages before publishing. +func (s *ConsensusIdentityStore) SignMessage(payload []byte) []byte { + return ed25519.Sign(s.privateKey, payload) +} + +func (s *ConsensusIdentityStore) GetPublicKey(nodeID string) ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if key, ok := s.publicKeys[nodeID]; ok { + return key, nil + } + return nil, fmt.Errorf("public key not found for node: %s", nodeID) +} + +func (s *ConsensusIdentityStore) VerifyInitiatorMessage(msg types.InitiatorMessage) error { + // In consensus mode without an explicit initiator key, skip + // verification. The internal API is only accessible within the + // cluster, so all messages are trusted. + if s.initiatorPubKey == nil { + return nil + } + + // Reconstruct the canonical payload that was signed (excludes the + // signature field itself). + raw, err := msg.Raw() + if err != nil { + return fmt.Errorf("failed to get raw message data: %w", err) + } + + sig := msg.Sig() + if len(sig) == 0 { + return fmt.Errorf("message has no signature") + } + + if !ed25519.Verify(s.initiatorPubKey, raw, sig) { + return fmt.Errorf("invalid Ed25519 signature from initiator") + } + + return nil +} + +func (s *ConsensusIdentityStore) AddPeerPublicKey(nodeID string, pubKey []byte) { + s.mu.Lock() + defer s.mu.Unlock() + s.publicKeys[nodeID] = pubKey +} + +// SignWireMessage signs a protocol wire message with this node's Ed25519 key. +func (s *ConsensusIdentityStore) SignWireMessage(msg *types.Message) { + msg.Sign(s.privateKey) +} + +// VerifyWireMessage verifies a protocol wire message's Ed25519 signature +// using the sender's public key looked up by SenderNodeID. +func (s *ConsensusIdentityStore) VerifyWireMessage(msg *types.Message) error { + if len(msg.Signature) == 0 { + return fmt.Errorf("message has no signature") + } + nodeID := msg.SenderNodeID + if nodeID == "" { + nodeID = msg.SenderID + } + pubKey, err := s.GetPublicKey(nodeID) + if err != nil { + return fmt.Errorf("failed to get sender's public key for node %s: %w", nodeID, err) + } + return msg.Verify(ed25519.PublicKey(pubKey)) +} + +// ConsensusPubSubAdapter adapts transport.PubSub to messaging.PubSub +type ConsensusPubSubAdapter struct { + pubsub *transport.PubSub +} + +func NewConsensusPubSubAdapter(pubsub *transport.PubSub) *ConsensusPubSubAdapter { + return &ConsensusPubSubAdapter{pubsub: pubsub} +} + +func (a *ConsensusPubSubAdapter) Publish(topic string, data []byte) error { + return a.pubsub.Publish(topic, data) +} + +func (a *ConsensusPubSubAdapter) PublishWithReply(topic, reply string, data []byte, headers map[string]string) error { + return a.pubsub.PublishWithReply(topic, reply, data, headers) +} + +func (a *ConsensusPubSubAdapter) Subscribe(topic string, handler func(msg *nats.Msg)) (messaging.Subscription, error) { + sub, err := a.pubsub.Subscribe(topic, func(msg *transport.NATSMsg) { + // Convert transport.NATSMsg to nats.Msg + natsMsg := &nats.Msg{ + Subject: msg.Subject, + Reply: msg.Reply, + Data: msg.Data, + Header: nats.Header(msg.Header), + } + handler(natsMsg) + }) + if err != nil { + return nil, err + } + return &consensusSubscription{sub: sub}, nil +} + +type consensusSubscription struct { + sub *transport.Subscription +} + +func (s *consensusSubscription) Unsubscribe() error { + return s.sub.Unsubscribe() +} + +// ConsensusPeerRegistry adapts transport.Registry to mpc.PeerRegistry +type ConsensusPeerRegistry struct { + registry *transport.Registry + nodeID string + peerIDs []string +} + +func NewConsensusPeerRegistry(registry *transport.Registry, nodeID string, peerIDs []string) *ConsensusPeerRegistry { + return &ConsensusPeerRegistry{ + registry: registry, + nodeID: nodeID, + peerIDs: peerIDs, + } +} + +func (r *ConsensusPeerRegistry) Ready() error { + return r.registry.Ready() +} + +func (r *ConsensusPeerRegistry) Resign() error { + return r.registry.Resign() +} + +func (r *ConsensusPeerRegistry) WatchPeersReady() { + r.registry.WatchPeersReady() +} + +func (r *ConsensusPeerRegistry) ArePeersReady() bool { + return r.registry.ArePeersReady() +} + +func (r *ConsensusPeerRegistry) GetReadyPeersCount() int64 { + return r.registry.GetReadyPeersCount() +} + +func (r *ConsensusPeerRegistry) GetTotalPeersCount() int64 { + return int64(len(r.peerIDs) + 1) // peers + self +} + +func (r *ConsensusPeerRegistry) GetReadyPeersIncludeSelf() []string { + return r.registry.GetReadyPeersIncludeSelf() +} + +// ConsensusKeyInfoStore adapts transport.KeyInfoStore to keyinfo.Store +type ConsensusKeyInfoStore struct { + store *transport.KeyInfoStore + peerRegistry *ConsensusPeerRegistry +} + +func NewConsensusKeyInfoStore(store *transport.KeyInfoStore, peerRegistry *ConsensusPeerRegistry) *ConsensusKeyInfoStore { + return &ConsensusKeyInfoStore{store: store, peerRegistry: peerRegistry} +} + +func (s *ConsensusKeyInfoStore) Get(walletID string) (*keyinfo.KeyInfo, error) { + info, err := s.store.Get(walletID) + if err != nil { + return nil, err + } + // Convert transport.KeyInfo to keyinfo.KeyInfo + // Populate ParticipantPeerIDs from peer registry (all ready peers including self) + participantPeerIDs := s.peerRegistry.GetReadyPeersIncludeSelf() + return &keyinfo.KeyInfo{ + ParticipantPeerIDs: participantPeerIDs, + Threshold: info.Threshold, + Version: 1, // Default version + }, nil +} + +func (s *ConsensusKeyInfoStore) Save(walletID string, info *keyinfo.KeyInfo) error { + return s.store.RegisterKey(walletID, "secp256k1", info.Threshold, "", "", nil) +} + +// ConsensusMessageQueue adapts transport for messaging.MessageQueue +type ConsensusMessageQueue struct { + transport *transport.Transport + nodeID string + queueType string + handlers map[string]func([]byte) error + mu sync.RWMutex +} + +func NewConsensusMessageQueue(t *transport.Transport, nodeID, queueType string) *ConsensusMessageQueue { + return &ConsensusMessageQueue{ + transport: t, + nodeID: nodeID, + queueType: queueType, + handlers: make(map[string]func([]byte) error), + } +} + +func (q *ConsensusMessageQueue) Enqueue(topic string, message []byte, options *messaging.EnqueueOptions) error { + // Broadcast the message via transport's Publish method + return q.transport.Publish(topic, message) +} + +func (q *ConsensusMessageQueue) Dequeue(topic string, handler func(message []byte) error) error { + q.mu.Lock() + q.handlers[topic] = handler + q.mu.Unlock() + // In consensus mode, messages are delivered via PubSub subscriptions + // The handler will be called when messages arrive + return nil +} + +func (q *ConsensusMessageQueue) Close() { + // Nothing to close in consensus mode +} + +// pubKeyToEthAddress derives an Ethereum address from an ECDSA public key. +// Accepts compressed (33 bytes), uncompressed (65 bytes), or raw x-coordinate (32 bytes). +func pubKeyToEthAddress(pubKey []byte) string { + var xyBytes []byte // 64 bytes: X(32) || Y(32) + switch len(pubKey) { + case 65: + // Uncompressed: 0x04 || X(32) || Y(32) + xyBytes = pubKey[1:] + case 33: + // Compressed: 0x02/0x03 || X(32) โ€” decompress via secp256k1 + x, y := ellipticUnmarshalCompressed(pubKey) + if x == nil { + return "" + } + xyBytes = append(x.Bytes(), y.Bytes()...) + case 32: + // Raw x-coordinate only โ€” try decompressing with 0x02 prefix (even y) + compressed := append([]byte{0x02}, pubKey...) + x, y := ellipticUnmarshalCompressed(compressed) + if x == nil { + // Try odd y + compressed[0] = 0x03 + x, y = ellipticUnmarshalCompressed(compressed) + } + if x == nil { + return "" + } + xBytes := make([]byte, 32) + yBytes := make([]byte, 32) + xB := x.Bytes() + yB := y.Bytes() + copy(xBytes[32-len(xB):], xB) + copy(yBytes[32-len(yB):], yB) + xyBytes = append(xBytes, yBytes...) + default: + // Try as hex string + decoded, err := hex.DecodeString(string(pubKey)) + if err == nil && len(decoded) > 0 { + return pubKeyToEthAddress(decoded) + } + return "" + } + if len(xyBytes) != 64 { + return "" + } + hash := sha3.NewLegacyKeccak256() + hash.Write(xyBytes) + addrBytes := hash.Sum(nil)[12:] + return "0x" + hex.EncodeToString(addrBytes) +} + +// ellipticUnmarshalCompressed decompresses a secp256k1 compressed public key. +func ellipticUnmarshalCompressed(compressed []byte) (*big.Int, *big.Int) { + if len(compressed) != 33 || (compressed[0] != 0x02 && compressed[0] != 0x03) { + return nil, nil + } + curve := crypto_elliptic.P256() // Use P256 as base; secp256k1 params below + // secp256k1 curve parameters + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + x := new(big.Int).SetBytes(compressed[1:33]) + // yยฒ = xยณ + 7 (mod p) for secp256k1 + x3 := new(big.Int).Mul(x, x) + x3.Mul(x3, x) + x3.Mod(x3, p) + y2 := new(big.Int).Add(x3, big.NewInt(7)) + y2.Mod(y2, p) + // ModSqrt + y := new(big.Int).ModSqrt(y2, p) + if y == nil { + return nil, nil + } + // Check parity + if y.Bit(0) != uint(compressed[0]&1) { + y.Sub(p, y) + } + _ = curve // suppress unused + return x, y +} + +// pubKeyToBtcAddress derives a Bitcoin P2PKH address from a secp256k1 public key. +// Accepts compressed (33 bytes), uncompressed (65 bytes), or raw x-coordinate (32 bytes). +func pubKeyToBtcAddress(pubKey []byte) string { + var compressed []byte + switch len(pubKey) { + case 33: + compressed = pubKey + case 65: + // Compress: take X coordinate, prefix with 0x02 or 0x03 based on Y parity + prefix := byte(0x02) + if pubKey[64]&1 == 1 { + prefix = 0x03 + } + compressed = append([]byte{prefix}, pubKey[1:33]...) + case 32: + // Raw x-coordinate โ€” use even y (0x02) + compressed = append([]byte{0x02}, pubKey...) + default: + return "" + } + // SHA256(compressed pubkey) + sha := sha256.Sum256(compressed) + // RIPEMD160(SHA256) + rip := ripemd160.New() + rip.Write(sha[:]) + pubKeyHash := rip.Sum(nil) // 20 bytes + + // Base58Check encode with version byte 0x00 (mainnet P2PKH) + return base58CheckEncode(0x00, pubKeyHash) +} + +// base58CheckEncode encodes data with a version byte using Base58Check encoding. +func base58CheckEncode(version byte, payload []byte) string { + versioned := append([]byte{version}, payload...) + // Double SHA256 checksum + first := sha256.Sum256(versioned) + second := sha256.Sum256(first[:]) + checksum := second[:4] + full := append(versioned, checksum...) + return base58.Encode(full) +} + +// eddsaPubKeyToSolAddress derives a Solana address from an Ed25519 public key. +// The address is simply the base58 encoding of the 32-byte public key. +func eddsaPubKeyToSolAddress(pubKey []byte) string { + if len(pubKey) != 32 { + return "" + } + return base58.Encode(pubKey) +} + diff --git a/cmd/mpcium/main.go b/cmd/mpcium/main.go deleted file mode 100644 index cff7b70..0000000 --- a/cmd/mpcium/main.go +++ /dev/null @@ -1,355 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "path/filepath" - "syscall" - - "github.com/fystack/mpcium/pkg/config" - "github.com/fystack/mpcium/pkg/constant" - "github.com/fystack/mpcium/pkg/event" - "github.com/fystack/mpcium/pkg/eventconsumer" - "github.com/fystack/mpcium/pkg/identity" - "github.com/fystack/mpcium/pkg/infra" - "github.com/fystack/mpcium/pkg/keyinfo" - "github.com/fystack/mpcium/pkg/kvstore" - "github.com/fystack/mpcium/pkg/logger" - "github.com/fystack/mpcium/pkg/messaging" - "github.com/fystack/mpcium/pkg/mpc" - "github.com/hashicorp/consul/api" - "github.com/nats-io/nats.go" - "github.com/spf13/viper" - "github.com/urfave/cli/v3" - "golang.org/x/term" -) - -const ( - // Version information - VERSION = "0.2.1" -) - -func main() { - app := &cli.Command{ - Name: "mpcium", - Usage: "Multi-Party Computation node for threshold signatures", - Version: VERSION, - Commands: []*cli.Command{ - { - Name: "start", - Usage: "Start an MPCIUM node", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "name", - Aliases: []string{"n"}, - Usage: "Node name", - Required: true, - }, - &cli.BoolFlag{ - Name: "decrypt-private-key", - Aliases: []string{"d"}, - Value: false, - Usage: "Decrypt node private key", - }, - &cli.BoolFlag{ - Name: "prompt-credentials", - Aliases: []string{"p"}, - Usage: "Prompt for sensitive parameters", - }, - &cli.BoolFlag{ - Name: "debug", - Usage: "Enable debug logging", - Value: false, - }, - }, - Action: runNode, - }, - { - Name: "version", - Usage: "Display detailed version information", - Action: func(ctx context.Context, c *cli.Command) error { - fmt.Printf("mpcium version %s\n", VERSION) - return nil - }, - }, - }, - } - - if err := app.Run(context.Background(), os.Args); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func runNode(ctx context.Context, c *cli.Command) error { - nodeName := c.String("name") - decryptPrivateKey := c.Bool("decrypt-private-key") - usePrompts := c.Bool("prompt-credentials") - debug := c.Bool("debug") - - config.InitViperConfig() - environment := viper.GetString("environment") - logger.Init(environment, debug) - - // Handle configuration based on prompt flag - if usePrompts { - promptForSensitiveCredentials() - } else { - // Validate the config values - checkRequiredConfigValues() - } - - consulClient := infra.GetConsulClient(environment) - badgerKV := NewBadgerKV(nodeName) - defer badgerKV.Close() - - keyinfoStore := keyinfo.NewStore(consulClient.KV()) - peers := LoadPeersFromConsul(consulClient) - nodeID := GetIDFromName(nodeName, peers) - - identityStore, err := identity.NewFileStore("identity", nodeName, decryptPrivateKey) - if err != nil { - logger.Fatal("Failed to create identity store", err) - } - - natsConn, err := GetNATSConnection(environment) - if err != nil { - logger.Fatal("Failed to connect to NATS", err) - } - defer natsConn.Close() - - pubsub := messaging.NewNATSPubSub(natsConn) - signingStream, err := messaging.NewJetStreamPubSub(natsConn, event.SigningPublisherStream, []string{ - event.SigningRequestTopic, - }) - if err != nil { - logger.Fatal("Failed to create JetStream PubSub", err) - } - - directMessaging := messaging.NewNatsDirectMessaging(natsConn) - mqManager := messaging.NewNATsMessageQueueManager("mpc", []string{ - "mpc.mpc_keygen_success.*", - event.SigningResultTopic, - }, natsConn) - - genKeySuccessQueue := mqManager.NewMessageQueue("mpc_keygen_success") - defer genKeySuccessQueue.Close() - singingResultQueue := mqManager.NewMessageQueue("signing_result") - defer singingResultQueue.Close() - - logger.Info("Node is running", "peerID", nodeID, "name", nodeName) - - peerNodeIDs := GetPeerIDs(peers) - peerRegistry := mpc.NewRegistry(nodeID, peerNodeIDs, consulClient.KV()) - - mpcNode := mpc.NewNode( - nodeID, - peerNodeIDs, - pubsub, - directMessaging, - badgerKV, - keyinfoStore, - peerRegistry, - identityStore, - ) - defer mpcNode.Close() - - eventConsumer := eventconsumer.NewEventConsumer( - mpcNode, - pubsub, - genKeySuccessQueue, - singingResultQueue, - identityStore, - ) - eventConsumer.Run() - defer eventConsumer.Close() - - timeoutConsumer := eventconsumer.NewTimeOutConsumer( - natsConn, - singingResultQueue, - ) - - timeoutConsumer.Run() - defer timeoutConsumer.Close() - signingConsumer := eventconsumer.NewSigningConsumer(natsConn, signingStream, pubsub) - - // Make the node ready before starting the signing consumer - peerRegistry.Ready() - - appContext, cancel := context.WithCancel(context.Background()) - // Setup signal handling to cancel context on termination signals. - go func() { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) - <-sigChan - logger.Warn("Shutdown signal received, canceling context...") - cancel() - }() - - if err := signingConsumer.Run(appContext); err != nil { - logger.Error("error running consumer:", err) - } - - return nil -} - -// Prompt user for sensitive configuration values -func promptForSensitiveCredentials() { - fmt.Println("WARNING: Please back up your Badger DB password in a secure location.") - fmt.Println("If you lose this password, you will permanently lose access to your data!") - - // Prompt for badger password with confirmation - var badgerPass []byte - var confirmPass []byte - var err error - - for { - fmt.Print("Enter Badger DB password: ") - badgerPass, err = term.ReadPassword(int(syscall.Stdin)) - if err != nil { - logger.Fatal("Failed to read badger password", err) - } - fmt.Println() // Add newline after password input - - if len(badgerPass) == 0 { - fmt.Println("Password cannot be empty. Please try again.") - continue - } - - fmt.Print("Confirm Badger DB password: ") - confirmPass, err = term.ReadPassword(int(syscall.Stdin)) - if err != nil { - logger.Fatal("Failed to read confirmation password", err) - } - fmt.Println() // Add newline after password input - - if string(badgerPass) != string(confirmPass) { - fmt.Println("Passwords do not match. Please try again.") - continue - } - - break - } - - // Show masked password for confirmation - maskedPassword := maskString(string(badgerPass)) - fmt.Printf("Password set: %s\n", maskedPassword) - - viper.Set("badger_password", string(badgerPass)) - - // Prompt for initiator public key (using regular input since it's not as sensitive) - var initiatorKey string - fmt.Print("Enter event initiator public key (hex): ") - fmt.Scanln(&initiatorKey) - - if initiatorKey == "" { - logger.Fatal("Initiator public key cannot be empty", nil) - } - - // Show masked key for confirmation - maskedKey := maskString(initiatorKey) - fmt.Printf("Event initiator public key set: %s\n", maskedKey) - - viper.Set("event_initiator_pubkey", initiatorKey) - fmt.Println("\nโœ“ Configuration complete!") -} - -// maskString shows the first and last character of a string, replacing the middle with asterisks -func maskString(s string) string { - if len(s) <= 2 { - return s // Too short to mask - } - - masked := s[0:1] - for i := 0; i < len(s)-2; i++ { - masked += "*" - } - masked += s[len(s)-1:] - - return masked -} - -// Check required configuration values are present -func checkRequiredConfigValues() { - // Show warning if we're using file-based config but no password is set - if viper.GetString("badger_password") == "" { - logger.Fatal("Badger password is required", nil) - } - - if viper.GetString("event_initiator_pubkey") == "" { - logger.Fatal("Event initiator public key is required", nil) - } -} - -func NewConsulClient(addr string) *api.Client { - // Create a new Consul client - consulConfig := api.DefaultConfig() - consulConfig.Address = addr - consulClient, err := api.NewClient(consulConfig) - if err != nil { - logger.Fatal("Failed to create consul client", err) - } - logger.Info("Connected to consul!") - return consulClient -} - -func LoadPeersFromConsul(consulClient *api.Client) []config.Peer { // Create a Consul Key-Value store client - kv := consulClient.KV() - peers, err := config.LoadPeersFromConsul(kv, "mpc_peers/") - if err != nil { - logger.Fatal("Failed to load peers from Consul", err) - } - logger.Info("Loaded peers from consul", "peers", peers) - - return peers -} - -func GetPeerIDs(peers []config.Peer) []string { - var peersIDs []string - for _, peer := range peers { - peersIDs = append(peersIDs, peer.ID) - } - return peersIDs -} - -// Given node name, loop through peers and find the matching ID -func GetIDFromName(name string, peers []config.Peer) string { - // Get nodeID from node name - nodeID := config.GetNodeID(name, peers) - if nodeID == "" { - logger.Fatal("Empty Node ID", fmt.Errorf("node ID not found for name %s", name)) - } - - return nodeID -} - -func NewBadgerKV(nodeName string) *kvstore.BadgerKVStore { - // Badger KV DB - dbPath := filepath.Join(".", "db", nodeName) - badgerKv, err := kvstore.NewBadgerKVStore( - dbPath, - []byte(viper.GetString("badger_password")), - ) - if err != nil { - logger.Fatal("Failed to create badger kv store", err) - } - logger.Info("Connected to badger kv store", "path", dbPath) - return badgerKv -} - -func GetNATSConnection(environment string) (*nats.Conn, error) { - if environment != constant.EnvProduction { - return nats.Connect(viper.GetString("nats.url")) - } - clientCert := filepath.Join(".", "certs", "client-cert.pem") - clientKey := filepath.Join(".", "certs", "client-key.pem") - caCert := filepath.Join(".", "certs", "rootCA.pem") - - return nats.Connect(viper.GetString("nats.url"), - nats.ClientCert(clientCert, clientKey), - nats.RootCAs(caCert), - nats.UserInfo(viper.GetString("nats.username"), viper.GetString("nats.password")), - ) -} diff --git a/compose.yml b/compose.yml new file mode 100644 index 0000000..b16e1ef --- /dev/null +++ b/compose.yml @@ -0,0 +1,80 @@ +# Minimal local development stack. +# SQLite is the default storage โ€” no external DB required to start. +# For production, uncomment postgres and valkey below. + +services: + api: + image: ghcr.io/luxfi/mpc-api:latest + command: > + start --mode consensus + --api-listen :8081 + --data /data/mpc + --jwt-secret ${JWT_SECRET:?JWT_SECRET must be set} + ports: + - "8081:8081" + environment: + - JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set} + volumes: + - mpc-data:/data/mpc + healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://localhost:8081/healthz"] + interval: 10s + timeout: 5s + retries: 3 + + dashboard: + image: ghcr.io/luxfi/mpc-dashboard:latest + ports: + - "3000:3000" + environment: + - NEXT_PUBLIC_API_URL=http://localhost:8081/v1 + depends_on: + api: + condition: service_healthy + + ## Production: uncomment to use PostgreSQL instead of SQLite. + # postgres: + # image: ghcr.io/hanzoai/sql:latest + # environment: + # POSTGRES_DB: mpc_api + # POSTGRES_USER: mpc + # POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + # volumes: + # - pgdata:/var/lib/postgresql/data + # ports: + # - "5432:5432" + # healthcheck: + # test: ["CMD-SHELL", "pg_isready -U mpc -d mpc_api"] + # interval: 5s + # timeout: 3s + # retries: 5 + + ## Production: uncomment to enable Valkey KV cache. + # valkey: + # image: valkey/valkey:8-alpine + # ports: + # - "6379:6379" + # healthcheck: + # test: ["CMD", "valkey-cli", "ping"] + # interval: 5s + # timeout: 3s + # retries: 5 + + ## When using postgres+valkey, change api command to: + # api: + # command: > + # start --mode consensus + # --api-db postgres://mpc:${POSTGRES_PASSWORD}@postgres:5432/mpc_api?sslmode=disable + # --api-kv valkey:6379 + # --api-listen :8081 + # --data /data/mpc + # --jwt-secret ${JWT_SECRET:?JWT_SECRET must be set} + # depends_on: + # postgres: + # condition: service_healthy + # valkey: + # condition: service_healthy + +volumes: + mpc-data: + # pgdata: # uncomment for production postgres diff --git a/config.prod.yaml.template b/config.prod.yaml.template deleted file mode 100644 index 1c3f893..0000000 --- a/config.prod.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -nats: - url: tls://127.0.0.1:4222 # Please use TLS for production - username: "" - password: "" - -consul: - address: https://consul.example.com # Use HTTPS for production - username: username - token: "" - password: "" - -mpc_threshold: 2 -environment: production # Set to production for production environment diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..7d14014 --- /dev/null +++ b/config.yaml @@ -0,0 +1,30 @@ +# MPC Node Configuration +# Env vars override everything: LUX_MPC_THRESHOLD, LUX_MPC_ENVIRONMENT, etc. +# Production: all secrets from KMS via --hsm-provider=env or --hsm-provider=aws + +mode: consensus +environment: local # mainnet | testnet | local + +# Consensus +mpc_threshold: 2 +max_concurrent_keygen: 2 + +# Storage (SQLite default โ€” no external DB needed) +# Set --api-db to a postgres:// URL to switch to PostgreSQL. +db_path: "." + +# Backup +backup_enabled: true +backup_period_seconds: 300 +backup_dir: backups + +# Key material +event_initiator_pubkey: "c3f3511cd2f849fd61e81ea62a842fd33367ee8fb5b867bb900877c7fb72ce3b" + +## NATS (legacy mode only โ€” deprecated, used only with --mode=legacy) +# nats: +# url: nats://127.0.0.1:4222 + +## Consul (legacy mode only โ€” deprecated, used only with --mode=legacy) +# consul: +# address: localhost:8500 diff --git a/config.yaml.template b/config.yaml.template deleted file mode 100644 index aed57fb..0000000 --- a/config.yaml.template +++ /dev/null @@ -1,9 +0,0 @@ -nats: - url: nats://127.0.0.1:4222 -consul: - address: localhost:8500 - -mpc_threshold: 2 -environment: development -badger_password: "your_badger_password" -event_initiator_pubkey: "event_initiator_pubkey" diff --git a/contracts/ThresholdPolicy.sol b/contracts/ThresholdPolicy.sol new file mode 100644 index 0000000..3216220 --- /dev/null +++ b/contracts/ThresholdPolicy.sol @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +import "@openzeppelin/contracts/access/AccessControl.sol"; +import "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; +import "@openzeppelin/contracts/utils/cryptography/MessageHashUtils.sol"; + +/** + * @title ThresholdPolicy + * @notice On-chain policy definition for MPC threshold signing. + * @dev Policies defined here are enforced by the T-Chain (MPC network). + * + * Architecture: + * X-Chain: Assets locked with policy hash + * T-Chain: MPC nodes verify policy before signing + * This contract: Defines unlock conditions + */ +contract ThresholdPolicy is AccessControl { + using ECDSA for bytes32; + using MessageHashUtils for bytes32; + + // ============================================================ + // ROLES + // ============================================================ + + bytes32 public constant POLICY_ADMIN_ROLE = keccak256("POLICY_ADMIN_ROLE"); + bytes32 public constant SIGNER_ROLE = keccak256("SIGNER_ROLE"); + + // ============================================================ + // TYPES + // ============================================================ + + // Rule opcodes (must match pkg/threshold/policy_vm.go) + uint8 public constant OP_CHECK_AMOUNT_LT = 0x01; + uint8 public constant OP_CHECK_AMOUNT_GT = 0x02; + uint8 public constant OP_CHECK_AMOUNT_RANGE = 0x03; + uint8 public constant OP_CHECK_CUMULATIVE = 0x04; + uint8 public constant OP_CHECK_WHITELIST = 0x10; + uint8 public constant OP_CHECK_BLACKLIST = 0x11; + uint8 public constant OP_CHECK_TIME_WINDOW = 0x20; + uint8 public constant OP_CHECK_TIME_LOCK = 0x21; + uint8 public constant OP_CHECK_COOLDOWN = 0x22; + uint8 public constant OP_REQUIRE_SIGNATURES = 0x30; + uint8 public constant OP_CHECK_VESTING = 0x40; + uint8 public constant OP_CHECK_STREAM_RATE = 0x41; + + // Result actions + uint8 public constant RESULT_ALLOW = 0x00; + uint8 public constant RESULT_DENY = 0x01; + uint8 public constant RESULT_REQUIRE_SIGS = 0x02; + uint8 public constant RESULT_DELAY = 0x03; + uint8 public constant RESULT_PARTIAL_UNLOCK = 0x04; + + struct Rule { + bytes8 ruleId; + uint8 opcode; + bytes[] operands; + uint8 resultAction; + bool enabled; + } + + struct Policy { + bytes32 walletId; + uint64 chainId; + uint64 version; + Rule[] rules; + address[] signers; + uint256 requiredSignatures; + uint256 createdAt; + uint256 expiresAt; + bool active; + } + + struct VestingSchedule { + uint256 totalAmount; + uint256 startTime; + uint256 duration; + uint256 cliffDuration; + uint256 released; + } + + struct StreamConfig { + uint256 ratePerSecond; + uint256 startTime; + uint256 totalStreamed; + } + + // ============================================================ + // STATE + // ============================================================ + + // walletId => Policy + mapping(bytes32 => Policy) public policies; + + // walletId => vesting schedule + mapping(bytes32 => VestingSchedule) public vestingSchedules; + + // walletId => streaming config + mapping(bytes32 => StreamConfig) public streamConfigs; + + // walletId => destination => whitelisted + mapping(bytes32 => mapping(address => bool)) public whitelists; + + // walletId => destination => blacklisted + mapping(bytes32 => mapping(address => bool)) public blacklists; + + // walletId => daily cumulative amount + mapping(bytes32 => uint256) public dailyCumulative; + mapping(bytes32 => uint256) public dailyCumulativeReset; + + // walletId => last transaction timestamp + mapping(bytes32 => uint256) public lastTxTimestamp; + + // ============================================================ + // EVENTS + // ============================================================ + + event PolicyRegistered( + bytes32 indexed walletId, + bytes32 indexed policyHash, + uint64 version, + address indexed registrar + ); + + event PolicyUpdated( + bytes32 indexed walletId, + bytes32 indexed newPolicyHash, + uint64 newVersion + ); + + event PolicyDeactivated(bytes32 indexed walletId); + + event RuleAdded( + bytes32 indexed walletId, + bytes8 ruleId, + uint8 opcode, + uint8 resultAction + ); + + event SignerAdded(bytes32 indexed walletId, address indexed signer); + event SignerRemoved(bytes32 indexed walletId, address indexed signer); + + event WhitelistUpdated(bytes32 indexed walletId, address indexed destination, bool whitelisted); + event BlacklistUpdated(bytes32 indexed walletId, address indexed destination, bool blacklisted); + + event VestingConfigured( + bytes32 indexed walletId, + uint256 totalAmount, + uint256 startTime, + uint256 duration, + uint256 cliffDuration + ); + + event StreamConfigured( + bytes32 indexed walletId, + uint256 ratePerSecond, + uint256 startTime + ); + + // ============================================================ + // CONSTRUCTOR + // ============================================================ + + constructor() { + _grantRole(DEFAULT_ADMIN_ROLE, msg.sender); + _grantRole(POLICY_ADMIN_ROLE, msg.sender); + } + + // ============================================================ + // POLICY MANAGEMENT + // ============================================================ + + /** + * @notice Register a new policy for a wallet + * @param walletId The MPC wallet identifier + * @param signers Array of authorized signers + * @param requiredSignatures Number of signatures required + * @param expiresAt Policy expiration timestamp (0 = never) + */ + function registerPolicy( + bytes32 walletId, + address[] calldata signers, + uint256 requiredSignatures, + uint256 expiresAt + ) external onlyRole(POLICY_ADMIN_ROLE) { + require(signers.length >= requiredSignatures, "Invalid threshold"); + require(!policies[walletId].active, "Policy exists"); + + Policy storage policy = policies[walletId]; + policy.walletId = walletId; + policy.chainId = uint64(block.chainid); + policy.version = 1; + policy.signers = signers; + policy.requiredSignatures = requiredSignatures; + policy.createdAt = block.timestamp; + policy.expiresAt = expiresAt; + policy.active = true; + + bytes32 policyHash = computePolicyHash(walletId); + + emit PolicyRegistered(walletId, policyHash, 1, msg.sender); + } + + /** + * @notice Add a rule to a policy + */ + function addRule( + bytes32 walletId, + bytes8 ruleId, + uint8 opcode, + bytes[] calldata operands, + uint8 resultAction + ) external onlyRole(POLICY_ADMIN_ROLE) { + require(policies[walletId].active, "Policy not active"); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: ruleId, + opcode: opcode, + operands: operands, + resultAction: resultAction, + enabled: true + })); + policy.version++; + + emit RuleAdded(walletId, ruleId, opcode, resultAction); + emit PolicyUpdated(walletId, computePolicyHash(walletId), policy.version); + } + + /** + * @notice Configure amount limit rule + */ + function setAmountLimit( + bytes32 walletId, + uint256 maxAmount, + uint8 resultAction + ) external onlyRole(POLICY_ADMIN_ROLE) { + bytes[] memory operands = new bytes[](1); + operands[0] = abi.encodePacked(maxAmount); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: bytes8(keccak256(abi.encodePacked("AMOUNT_LIMIT", block.timestamp))), + opcode: OP_CHECK_AMOUNT_GT, + operands: operands, + resultAction: resultAction, + enabled: true + })); + policy.version++; + } + + /** + * @notice Configure daily cumulative limit + */ + function setDailyLimit( + bytes32 walletId, + uint256 dailyLimit, + uint8 resultAction + ) external onlyRole(POLICY_ADMIN_ROLE) { + bytes[] memory operands = new bytes[](1); + operands[0] = abi.encodePacked(dailyLimit); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: bytes8(keccak256(abi.encodePacked("DAILY_LIMIT", block.timestamp))), + opcode: OP_CHECK_CUMULATIVE, + operands: operands, + resultAction: resultAction, + enabled: true + })); + policy.version++; + } + + /** + * @notice Configure time lock (no transactions before unlock time) + */ + function setTimeLock( + bytes32 walletId, + uint256 unlockTime, + uint8 resultAction + ) external onlyRole(POLICY_ADMIN_ROLE) { + bytes[] memory operands = new bytes[](1); + operands[0] = abi.encodePacked(uint64(unlockTime)); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: bytes8(keccak256(abi.encodePacked("TIME_LOCK", block.timestamp))), + opcode: OP_CHECK_TIME_LOCK, + operands: operands, + resultAction: resultAction, + enabled: true + })); + policy.version++; + } + + /** + * @notice Configure cooldown period between transactions + */ + function setCooldown( + bytes32 walletId, + uint256 cooldownSeconds, + uint8 resultAction + ) external onlyRole(POLICY_ADMIN_ROLE) { + bytes[] memory operands = new bytes[](1); + operands[0] = abi.encodePacked(uint64(cooldownSeconds)); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: bytes8(keccak256(abi.encodePacked("COOLDOWN", block.timestamp))), + opcode: OP_CHECK_COOLDOWN, + operands: operands, + resultAction: resultAction, + enabled: true + })); + policy.version++; + } + + // ============================================================ + // VESTING & STREAMING + // ============================================================ + + /** + * @notice Configure vesting schedule for a wallet + */ + function configureVesting( + bytes32 walletId, + uint256 totalAmount, + uint256 startTime, + uint256 duration, + uint256 cliffDuration + ) external onlyRole(POLICY_ADMIN_ROLE) { + require(policies[walletId].active, "Policy not active"); + require(duration > 0, "Duration must be > 0"); + require(cliffDuration <= duration, "Cliff > duration"); + + vestingSchedules[walletId] = VestingSchedule({ + totalAmount: totalAmount, + startTime: startTime, + duration: duration, + cliffDuration: cliffDuration, + released: 0 + }); + + // Add vesting rule + bytes[] memory operands = new bytes[](4); + operands[0] = abi.encodePacked(totalAmount); + operands[1] = abi.encodePacked(uint64(startTime)); + operands[2] = abi.encodePacked(uint64(duration)); + operands[3] = abi.encodePacked(uint64(cliffDuration)); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: bytes8(keccak256(abi.encodePacked("VESTING", block.timestamp))), + opcode: OP_CHECK_VESTING, + operands: operands, + resultAction: RESULT_PARTIAL_UNLOCK, + enabled: true + })); + policy.version++; + + emit VestingConfigured(walletId, totalAmount, startTime, duration, cliffDuration); + } + + /** + * @notice Configure streaming payments + */ + function configureStream( + bytes32 walletId, + uint256 ratePerSecond, + uint256 startTime + ) external onlyRole(POLICY_ADMIN_ROLE) { + require(policies[walletId].active, "Policy not active"); + require(ratePerSecond > 0, "Rate must be > 0"); + + streamConfigs[walletId] = StreamConfig({ + ratePerSecond: ratePerSecond, + startTime: startTime, + totalStreamed: 0 + }); + + // Add streaming rule + bytes[] memory operands = new bytes[](2); + operands[0] = abi.encodePacked(ratePerSecond); + operands[1] = abi.encodePacked(uint64(startTime)); + + Policy storage policy = policies[walletId]; + policy.rules.push(Rule({ + ruleId: bytes8(keccak256(abi.encodePacked("STREAM", block.timestamp))), + opcode: OP_CHECK_STREAM_RATE, + operands: operands, + resultAction: RESULT_PARTIAL_UNLOCK, + enabled: true + })); + policy.version++; + + emit StreamConfigured(walletId, ratePerSecond, startTime); + } + + /** + * @notice Calculate vested amount available + */ + function vestedAmount(bytes32 walletId) public view returns (uint256) { + VestingSchedule storage schedule = vestingSchedules[walletId]; + if (schedule.totalAmount == 0) return 0; + if (block.timestamp < schedule.startTime + schedule.cliffDuration) return 0; + + uint256 elapsed = block.timestamp - schedule.startTime; + if (elapsed >= schedule.duration) { + return schedule.totalAmount - schedule.released; + } + + uint256 vested = (schedule.totalAmount * elapsed) / schedule.duration; + return vested - schedule.released; + } + + /** + * @notice Calculate streamable amount available + */ + function streamableAmount(bytes32 walletId) public view returns (uint256) { + StreamConfig storage config = streamConfigs[walletId]; + if (config.ratePerSecond == 0) return 0; + if (block.timestamp < config.startTime) return 0; + + uint256 elapsed = block.timestamp - config.startTime; + uint256 total = config.ratePerSecond * elapsed; + return total - config.totalStreamed; + } + + // ============================================================ + // WHITELIST / BLACKLIST + // ============================================================ + + function addToWhitelist(bytes32 walletId, address destination) external onlyRole(POLICY_ADMIN_ROLE) { + whitelists[walletId][destination] = true; + emit WhitelistUpdated(walletId, destination, true); + } + + function removeFromWhitelist(bytes32 walletId, address destination) external onlyRole(POLICY_ADMIN_ROLE) { + whitelists[walletId][destination] = false; + emit WhitelistUpdated(walletId, destination, false); + } + + function addToBlacklist(bytes32 walletId, address destination) external onlyRole(POLICY_ADMIN_ROLE) { + blacklists[walletId][destination] = true; + emit BlacklistUpdated(walletId, destination, true); + } + + function removeFromBlacklist(bytes32 walletId, address destination) external onlyRole(POLICY_ADMIN_ROLE) { + blacklists[walletId][destination] = false; + emit BlacklistUpdated(walletId, destination, false); + } + + // ============================================================ + // SIGNER MANAGEMENT + // ============================================================ + + function addSigner(bytes32 walletId, address signer) external onlyRole(POLICY_ADMIN_ROLE) { + require(policies[walletId].active, "Policy not active"); + policies[walletId].signers.push(signer); + policies[walletId].version++; + emit SignerAdded(walletId, signer); + } + + function removeSigner(bytes32 walletId, address signer) external onlyRole(POLICY_ADMIN_ROLE) { + require(policies[walletId].active, "Policy not active"); + Policy storage policy = policies[walletId]; + + for (uint i = 0; i < policy.signers.length; i++) { + if (policy.signers[i] == signer) { + policy.signers[i] = policy.signers[policy.signers.length - 1]; + policy.signers.pop(); + policy.version++; + emit SignerRemoved(walletId, signer); + return; + } + } + revert("Signer not found"); + } + + function setRequiredSignatures(bytes32 walletId, uint256 required) external onlyRole(POLICY_ADMIN_ROLE) { + require(policies[walletId].active, "Policy not active"); + require(required <= policies[walletId].signers.length, "Invalid threshold"); + policies[walletId].requiredSignatures = required; + policies[walletId].version++; + } + + // ============================================================ + // VERIFICATION (Called by MPC nodes) + // ============================================================ + + /** + * @notice Compute policy hash for verification + */ + function computePolicyHash(bytes32 walletId) public view returns (bytes32) { + Policy storage policy = policies[walletId]; + + bytes memory encoded = abi.encodePacked( + walletId, + policy.chainId, + policy.version + ); + + for (uint i = 0; i < policy.rules.length; i++) { + Rule storage rule = policy.rules[i]; + if (rule.enabled) { + encoded = abi.encodePacked( + encoded, + rule.ruleId, + rule.opcode, + rule.resultAction + ); + } + } + + return keccak256(encoded); + } + + /** + * @notice Get policy data for MPC verification + */ + function getPolicyData(bytes32 walletId) external view returns ( + bytes32 policyHash, + uint64 version, + address[] memory signers, + uint256 requiredSignatures, + uint256 expiresAt, + bool active + ) { + Policy storage policy = policies[walletId]; + return ( + computePolicyHash(walletId), + policy.version, + policy.signers, + policy.requiredSignatures, + policy.expiresAt, + policy.active + ); + } + + /** + * @notice Get all rules for a policy + */ + function getRules(bytes32 walletId) external view returns (Rule[] memory) { + return policies[walletId].rules; + } + + /** + * @notice Check if address is whitelisted + */ + function isWhitelisted(bytes32 walletId, address destination) external view returns (bool) { + return whitelists[walletId][destination]; + } + + /** + * @notice Check if address is blacklisted + */ + function isBlacklisted(bytes32 walletId, address destination) external view returns (bool) { + return blacklists[walletId][destination]; + } + + // ============================================================ + // STATE UPDATES (Called after MPC signing) + // ============================================================ + + /** + * @notice Record a transaction (called by bridge after successful signing) + */ + function recordTransaction( + bytes32 walletId, + uint256 amount, + address destination + ) external onlyRole(SIGNER_ROLE) { + // Reset daily cumulative if new day + if (block.timestamp / 1 days > dailyCumulativeReset[walletId]) { + dailyCumulative[walletId] = 0; + dailyCumulativeReset[walletId] = block.timestamp / 1 days; + } + + // Update cumulative + dailyCumulative[walletId] += amount; + + // Update last tx timestamp + lastTxTimestamp[walletId] = block.timestamp; + + // Update vesting released + VestingSchedule storage vesting = vestingSchedules[walletId]; + if (vesting.totalAmount > 0) { + vesting.released += amount; + } + + // Update streaming + StreamConfig storage stream = streamConfigs[walletId]; + if (stream.ratePerSecond > 0) { + stream.totalStreamed += amount; + } + } +} diff --git a/dashboard/.dockerignore b/dashboard/.dockerignore new file mode 100644 index 0000000..686ef80 --- /dev/null +++ b/dashboard/.dockerignore @@ -0,0 +1,4 @@ +.git +node_modules +.next +.env* diff --git a/dashboard/.env.example b/dashboard/.env.example new file mode 100644 index 0000000..326bd05 --- /dev/null +++ b/dashboard/.env.example @@ -0,0 +1,2 @@ +# MPC API URL +NEXT_PUBLIC_API_URL=http://localhost:8081/api/v1 diff --git a/dashboard/app/audit/page.tsx b/dashboard/app/audit/page.tsx new file mode 100644 index 0000000..ebac85c --- /dev/null +++ b/dashboard/app/audit/page.tsx @@ -0,0 +1,77 @@ +'use client' + +import { useState, useEffect } from 'react' +import { Nav } from '@/components/layout/nav' +import { api } from '@/lib/api' +import type { AuditEntry } from '@/lib/types' + +export default function AuditPage() { + const [entries, setEntries] = useState([]) + const [error, setError] = useState('') + + useEffect(() => { + api.listAudit().then(setEntries).catch((e) => setError(e.message)) + }, []) + + return ( + <> +