Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

plasma mode: Specify valid commitment type #10622

Merged
merged 11 commits into from
May 28, 2024
23 changes: 22 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,12 @@ jobs:
- run:
name: Copy Plasma allocs to .devnet-plasma
command: cp -r .devnet/ .devnet-plasma/
- run:
name: Generate Generic Plasma allocs
command: DEVNET_PLASMA="true" GENERIC_PLASMA="true" make devnet-allocs
- run:
name: Copy Plasma allocs to .devnet-plasma
command: cp -r .devnet/ .devnet-plasma-generic/
- run:
name: Generate non-FPAC allocs
command: make devnet-allocs
Expand Down Expand Up @@ -251,6 +257,11 @@ jobs:
- ".devnet-plasma/allocs-l2.json"
- ".devnet-plasma/allocs-l2-delta.json"
- ".devnet-plasma/allocs-l2-ecotone.json"
- ".devnet-plasma-generic/allocs-l1.json"
- ".devnet-plasma-generic/addresses.json"
- ".devnet-plasma-generic/allocs-l2.json"
- ".devnet-plasma-generic/allocs-l2-delta.json"
- ".devnet-plasma-generic/allocs-l2-ecotone.json"
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
- "packages/contracts-bedrock/deploy-config/devnetL1.json"
- "packages/contracts-bedrock/deployments/devnetL1"
- notify-failures-on-develop
Expand Down Expand Up @@ -1268,6 +1279,16 @@ jobs:
- run:
name: Set DEVNET_PLASMA = true
command: echo 'export DEVNET_PLASMA=true' >> $BASH_ENV
- when:
condition:
equal: ['plasma-generic', <<parameters.fpac>>]
steps:
- run:
name: Set DEVNET_PLASMA = true
command: echo 'export DEVNET_PLASMA=true' >> $BASH_ENV
- run:
name: Set GENERIC_PLASMA = true
command: echo 'export GENERIC_PLASMA=true' >> $BASH_ENV
- check-changed:
patterns: op-(.+),packages,ops-bedrock,bedrock-devnet
- run:
Expand Down Expand Up @@ -1876,7 +1897,7 @@ workflows:
- devnet:
matrix:
parameters:
fpac: ["legacy", "fault-proofs", "plasma"]
fpac: ["legacy", "fault-proofs", "plasma", "plasma-generic"]
requires:
- pnpm-monorepo
- op-batcher-docker-build
Expand Down
13 changes: 11 additions & 2 deletions bedrock-devnet/devnet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
DEVNET_NO_BUILD = os.getenv('DEVNET_NO_BUILD') == "true"
DEVNET_FPAC = os.getenv('DEVNET_FPAC') == "true"
DEVNET_PLASMA = os.getenv('DEVNET_PLASMA') == "true"
GENERIC_PLASMA = os.getenv('GENERIC_PLASMA') == "true"

class Bunch:
def __init__(self, **kwds):
Expand Down Expand Up @@ -135,6 +136,8 @@ def init_devnet_l1_deploy_config(paths, update_timestamp=False):
deploy_config['faultGameWithdrawalDelay'] = 0
if DEVNET_PLASMA:
deploy_config['usePlasma'] = True
if GENERIC_PLASMA:
deploy_config['daCommitmentType'] = 1
write_json(paths.devnet_config_path, deploy_config)

def devnet_l1_allocs(paths):
Expand Down Expand Up @@ -273,11 +276,17 @@ def devnet_deploy(paths):

if DEVNET_PLASMA:
docker_env['PLASMA_ENABLED'] = 'true'
docker_env['PLASMA_DA_SERVICE'] = 'false'
else:
docker_env['PLASMA_ENABLED'] = 'false'

if GENERIC_PLASMA:
docker_env['PLASMA_GENERIC_DA'] = 'true'
docker_env['PLASMA_DA_SERVICE'] = 'true'
else:
docker_env['PLASMA_GENERIC_DA'] = 'false'
trianglesphere marked this conversation as resolved.
Show resolved Hide resolved
docker_env['PLASMA_DA_SERVICE'] = 'false'


# Bring up the rest of the services.
log.info('Bringing up `op-node`, `op-proposer` and `op-batcher`.')
run_command(['docker', 'compose', 'up', '-d', 'op-node', 'op-proposer', 'op-batcher', 'artifact-server'], cwd=paths.ops_bedrock_dir, env=docker_env)
Expand All @@ -287,7 +296,7 @@ def devnet_deploy(paths):
log.info('Bringing up `op-challenger`.')
run_command(['docker', 'compose', 'up', '-d', 'op-challenger'], cwd=paths.ops_bedrock_dir, env=docker_env)

# Optionally bring up OP Plasma.
# Optionally bring up Plasma Mode components.
if DEVNET_PLASMA:
log.info('Bringing up `da-server`, `sentinel`.') # TODO(10141): We don't have public sentinel images yet
run_command(['docker', 'compose', 'up', '-d', 'da-server'], cwd=paths.ops_bedrock_dir, env=docker_env)
Expand Down
11 changes: 9 additions & 2 deletions op-chain-ops/genesis/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,8 @@ type DeployConfig struct {
CustomGasTokenAddress common.Address `json:"customGasTokenAddress"`
// UsePlasma is a flag that indicates if the system is using op-plasma
UsePlasma bool `json:"usePlasma"`
// DACommitmentType specifies the allowed commitment
DACommitmentType uint64 `json:"daCommitmentType"`
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
// DAChallengeWindow represents the block interval during which the availability of a data commitment can be challenged.
DAChallengeWindow uint64 `json:"daChallengeWindow"`
// DAResolveWindow represents the block interval during which a data availability challenge can be resolved.
Expand Down Expand Up @@ -443,6 +445,9 @@ func (d *DeployConfig) Check() error {
if d.DAResolveWindow == 0 {
return fmt.Errorf("%w: DAResolveWindow cannot be 0 when using plasma mode", ErrInvalidDeployConfig)
}
if !(d.DACommitmentType == 0 || d.DACommitmentType == 1) {
trianglesphere marked this conversation as resolved.
Show resolved Hide resolved
return fmt.Errorf("%w: DACommitmentType must be either 0 (keccak) or 1 (generic commitment)", ErrInvalidDeployConfig)
}
}
if d.UseCustomGasToken {
if d.CustomGasTokenAddress == (common.Address{}) {
Expand Down Expand Up @@ -513,9 +518,10 @@ func (d *DeployConfig) CheckAddresses() error {
if d.OptimismPortalProxy == (common.Address{}) {
return fmt.Errorf("%w: OptimismPortalProxy cannot be address(0)", ErrInvalidDeployConfig)
}
if d.UsePlasma && d.DAChallengeProxy == (common.Address{}) {
if d.UsePlasma && d.DACommitmentType == 0 && d.DAChallengeProxy == (common.Address{}) {
return fmt.Errorf("%w: DAChallengeContract cannot be address(0) when using plasma mode", ErrInvalidDeployConfig)

} else if d.UsePlasma && d.DACommitmentType == 1 && d.DAChallengeProxy != (common.Address{}) {
return fmt.Errorf("%w: DAChallengeContract must be address(0) when using generic commitments in plasma mode", ErrInvalidDeployConfig)
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
}
trianglesphere marked this conversation as resolved.
Show resolved Hide resolved
return nil
}
Expand Down Expand Up @@ -612,6 +618,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas
var plasma *rollup.PlasmaConfig
if d.UsePlasma {
plasma = &rollup.PlasmaConfig{
CommitmentType: d.DACommitmentType,
DAChallengeAddress: d.DAChallengeProxy,
DAChallengeWindow: d.DAChallengeWindow,
DAResolveWindow: d.DAResolveWindow,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@
"useFaultProofs": false,
"usePlasma": false,
"daBondSize": 0,
"daCommitmentType": 0,
"daChallengeProxy": "0x0000000000000000000000000000000000000000",
"daChallengeWindow": 0,
"daResolveWindow": 0,
Expand Down
2 changes: 1 addition & 1 deletion op-node/rollup/derive/plasma_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func (s *PlasmaDataSource) Next(ctx context.Context) (eth.Data, error) {
return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %x from da service: %w", s.comm, err))
}
// inputs are limited to a max size to ensure they can be challenged in the DA contract.
if len(data) > plasma.MaxInputSize {
if s.comm.CommitmentType() == plasma.Keccak256CommitmentType && len(data) > plasma.MaxInputSize {
s.log.Warn("input data exceeds max size", "size", len(data), "max", plasma.MaxInputSize)
s.comm = nil
return s.Next(ctx)
Expand Down
23 changes: 20 additions & 3 deletions op-node/rollup/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ type Genesis struct {
type PlasmaConfig struct {
// L1 DataAvailabilityChallenge contract proxy address
DAChallengeAddress common.Address `json:"da_challenge_contract_address,omitempty"`
// CommitmentType specifies which commitment type can be used. Defaults to Keccak (type 0) if not present
CommitmentType uint64 `json:"da_commitment_type"`
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
// DA challenge window value set on the DAC contract. Used in plasma mode
// to compute when a commitment can no longer be challenged.
DAChallengeWindow uint64 `json:"da_challenge_window"`
Expand Down Expand Up @@ -345,6 +347,18 @@ func validatePlasmaConfig(cfg *Config) error {
if cfg.LegacyDAResolveWindow != cfg.PlasmaConfig.DAResolveWindow {
return fmt.Errorf("LegacyDAResolveWindow (%v) != PlasmaConfig.DAResolveWindow (%v)", cfg.LegacyDAResolveWindow, cfg.PlasmaConfig.DAResolveWindow)
}
if cfg.PlasmaConfig.CommitmentType != 0 {
return errors.New("Cannot set CommitmentType with the legacy config")
}
} else if cfg.PlasmaConfig != nil {
if !(cfg.PlasmaConfig.CommitmentType == 0 || cfg.PlasmaConfig.CommitmentType == 1) {
return fmt.Errorf("invalid commitment type: %v", cfg.PlasmaConfig.CommitmentType)
}
if cfg.PlasmaConfig.CommitmentType == 0 && cfg.PlasmaConfig.DAChallengeAddress == (common.Address{}) {
return errors.New("Must set da_challenge_contract_address for keccak commitments")
} else if cfg.PlasmaConfig.CommitmentType == 1 && cfg.PlasmaConfig.DAChallengeAddress != (common.Address{}) {
return errors.New("Must set empty da_challenge_contract_address for generic commitments")
}
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
}
return nil
}
Expand Down Expand Up @@ -485,9 +499,6 @@ func (c *Config) GetOPPlasmaConfig() (plasma.Config, error) {
if c.PlasmaConfig == nil {
return plasma.Config{}, errors.New("no plasma config")
}
if c.PlasmaConfig.DAChallengeAddress == (common.Address{}) {
return plasma.Config{}, errors.New("missing DAChallengeAddress")
}
if c.PlasmaConfig.DAChallengeWindow == uint64(0) {
return plasma.Config{}, errors.New("missing DAChallengeWindow")
}
Expand All @@ -498,6 +509,7 @@ func (c *Config) GetOPPlasmaConfig() (plasma.Config, error) {
DAChallengeContractAddress: c.PlasmaConfig.DAChallengeAddress,
ChallengeWindow: c.PlasmaConfig.DAChallengeWindow,
ResolveWindow: c.PlasmaConfig.DAResolveWindow,
CommitmentType: plasma.CommitmentType(c.PlasmaConfig.CommitmentType),
}, nil
}

Expand Down Expand Up @@ -550,6 +562,9 @@ func (c *Config) Description(l2Chains map[string]string) string {
banner += fmt.Sprintf(" - Interop: %s\n", fmtForkTimeOrUnset(c.InteropTime))
// Report the protocol version
banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport)
if c.PlasmaConfig != nil {
banner += fmt.Sprintf("Node supports Plasma Mode with CommitmentType %v\n", c.PlasmaConfig.CommitmentType)
}
return banner
}

Expand All @@ -569,6 +584,7 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
if networkL1 == "" {
networkL1 = "unknown L1"
}

log.Info("Rollup Config", "l2_chain_id", c.L2ChainID, "l2_network", networkL2, "l1_chain_id", c.L1ChainID,
"l1_network", networkL1, "l2_start_time", c.Genesis.L2Time, "l2_block_hash", c.Genesis.L2.Hash.String(),
"l2_block_number", c.Genesis.L2.Number, "l1_block_hash", c.Genesis.L1.Hash.String(),
Expand All @@ -578,6 +594,7 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
"ecotone_time", fmtForkTimeOrUnset(c.EcotoneTime),
"fjord_time", fmtForkTimeOrUnset(c.FjordTime),
"interop_time", fmtForkTimeOrUnset(c.InteropTime),
"plasma_mode", c.PlasmaConfig != nil,
)
}

Expand Down
2 changes: 1 addition & 1 deletion op-plasma/cmd/daserver/entrypoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func StartDAServer(cliCtx *cli.Context) error {
store = s3
}

server := plasma.NewDAServer(cliCtx.String(ListenAddrFlagName), cliCtx.Int(PortFlagName), store, l)
server := plasma.NewDAServer(cliCtx.String(ListenAddrFlagName), cliCtx.Int(PortFlagName), store, l, cfg.UseGenericComm)

if err := server.Start(); err != nil {
return fmt.Errorf("failed to start the DA server")
Expand Down
10 changes: 10 additions & 0 deletions op-plasma/cmd/daserver/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ const (
S3AccessKeyIDFlagName = "s3.access-key-id"
S3AccessKeySecretFlagName = "s3.access-key-secret"
FileStorePathFlagName = "file.path"
GenericCommFlagName = "generic-commitment"
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
)

const EnvVarPrefix = "OP_PLASMA_DA_SERVER"
Expand All @@ -44,6 +45,12 @@ var (
Usage: "path to directory for file storage",
EnvVars: prefixEnvVars("FILESTORE_PATH"),
}
GenericCommFlag = &cli.BoolFlag{
Name: GenericCommFlagName,
Usage: "enable generic commitments for testing. Not for production use.",
EnvVars: prefixEnvVars("GENERIC_COMMITMENT"),
Value: false,
}
S3BucketFlag = &cli.StringFlag{
Name: S3BucketFlagName,
Usage: "bucket name for S3 storage",
Expand Down Expand Up @@ -80,6 +87,7 @@ var optionalFlags = []cli.Flag{
S3EndpointFlag,
S3AccessKeyIDFlag,
S3AccessKeySecretFlag,
GenericCommFlag,
}

func init() {
Expand All @@ -96,6 +104,7 @@ type CLIConfig struct {
S3Endpoint string
S3AccessKeyID string
S3AccessKeySecret string
UseGenericComm bool
}

func ReadCLIConfig(ctx *cli.Context) CLIConfig {
Expand All @@ -105,6 +114,7 @@ func ReadCLIConfig(ctx *cli.Context) CLIConfig {
S3Endpoint: ctx.String(S3EndpointFlagName),
S3AccessKeyID: ctx.String(S3AccessKeyIDFlagName),
S3AccessKeySecret: ctx.String(S3AccessKeySecretFlagName),
UseGenericComm: ctx.Bool(GenericCommFlagName),
}
}

Expand Down
2 changes: 1 addition & 1 deletion op-plasma/daclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ func (c *DAClient) setInput(ctx context.Context, img []byte) (CommitmentData, er
return nil, err
}

comm, err := DecodeGenericCommitment(b)
comm, err := DecodeCommitmentData(b)
if err != nil {
return nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions op-plasma/daclient_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func TestDAClientPrecomputed(t *testing.T) {

ctx := context.Background()

server := NewDAServer("127.0.0.1", 0, store, logger)
server := NewDAServer("127.0.0.1", 0, store, logger, false)

require.NoError(t, server.Start())

Expand Down Expand Up @@ -108,7 +108,7 @@ func TestDAClientService(t *testing.T) {

ctx := context.Background()

server := NewDAServer("127.0.0.1", 0, store, logger)
server := NewDAServer("127.0.0.1", 0, store, logger, false)

require.NoError(t, server.Start())

Expand Down
44 changes: 31 additions & 13 deletions op-plasma/damgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ type HeadSignalFn func(eth.L1BlockRef)
type Config struct {
// Required for filtering contract events
DAChallengeContractAddress common.Address
// Allowed CommitmentType
CommitmentType CommitmentType
// The number of l1 blocks after the input is committed during which one can challenge.
ChallengeWindow uint64
// The number of l1 blocks after a commitment is challenged during which one can resolve.
Expand Down Expand Up @@ -166,6 +168,10 @@ func (d *DA) Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemC
// GetInput returns the input data for the given commitment bytes. blockNumber is required to lookup
// the challenge status in the DataAvailabilityChallenge L1 contract.
func (d *DA) GetInput(ctx context.Context, l1 L1Fetcher, comm CommitmentData, blockId eth.BlockID) (eth.Data, error) {
// If it's not the right commitment type, report it as an expired commitment in order to skip it
if d.cfg.CommitmentType != comm.CommitmentType() {
return nil, fmt.Errorf("invalid commitment type; expected: %v, got: %v: %w", d.cfg.CommitmentType, comm.CommitmentType(), ErrExpiredChallenge)
}
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
// If the challenge head is ahead in the case of a pipeline reset or stall, we might have synced a
// challenge event for this commitment. Otherwise we mark the commitment as part of the canonical
// chain so potential future challenge events can be selected.
Expand Down Expand Up @@ -205,6 +211,10 @@ func (d *DA) GetInput(ctx context.Context, l1 L1Fetcher, comm CommitmentData, bl
if !notFound {
return data, nil
}
// Generic Commitments don't resolve from L1 so if we still can't find the data with out of luck
if comm.CommitmentType() == GenericCommitmentType {
return nil, ErrMissingPastWindow
}
ajsutton marked this conversation as resolved.
Show resolved Hide resolved
// data not found in storage, return from challenge resolved input
resolvedInput, err := d.state.GetResolvedInput(comm.Encode())
if err != nil {
Expand Down Expand Up @@ -310,31 +320,39 @@ func (d *DA) LoadChallengeEvents(ctx context.Context, l1 L1Fetcher, block eth.Bl
d.log.Error("tx hash mismatch", "block", block.Number, "txIdx", i, "log", log.Index, "txHash", tx.Hash(), "receiptTxHash", log.TxHash)
continue
}
// Decode the input from resolver tx calldata
input, err := DecodeResolvedInput(tx.Data())
if err != nil {
d.log.Error("failed to decode resolved input", "block", block.Number, "txIdx", i, "err", err)
continue
}
if err := comm.Verify(input); err != nil {
d.log.Error("failed to verify commitment", "block", block.Number, "txIdx", i, "err", err)
continue
var input []byte
if d.cfg.CommitmentType == Keccak256CommitmentType {
// Decode the input from resolver tx calldata
input, err = DecodeResolvedInput(tx.Data())
if err != nil {
d.log.Error("failed to decode resolved input", "block", block.Number, "txIdx", i, "err", err)
continue
}
if err := comm.Verify(input); err != nil {
d.log.Error("failed to verify commitment", "block", block.Number, "txIdx", i, "err", err)
continue
}
}
d.log.Debug("challenge resolved", "block", block, "txIdx", i)
d.log.Info("challenge resolved", "block", block, "txIdx", i, "comm", comm.Encode())
d.state.SetResolvedChallenge(comm.Encode(), input, log.BlockNumber)
case ChallengeActive:
d.log.Info("detected new active challenge", "block", block)
d.log.Info("detected new active challenge", "block", block, "comm", comm.Encode())
d.state.SetActiveChallenge(comm.Encode(), log.BlockNumber, d.cfg.ResolveWindow)
default:
d.log.Warn("skipping unknown challenge status", "block", block.Number, "tx", i, "log", log.Index, "status", status)
d.log.Warn("skipping unknown challenge status", "block", block.Number, "tx", i, "log", log.Index, "status", status, "comm", comm.Encode())
}
}
return nil
}

// fetchChallengeLogs returns logs for challenge events if any for the given block
func (d *DA) fetchChallengeLogs(ctx context.Context, l1 L1Fetcher, block eth.BlockID) ([]*types.Log, error) { //cached with deposits events call so not expensive
func (d *DA) fetchChallengeLogs(ctx context.Context, l1 L1Fetcher, block eth.BlockID) ([]*types.Log, error) {
var logs []*types.Log
// Don't look at the challenge contract if there is no challenge contract.
if d.cfg.CommitmentType == GenericCommitmentType {
return logs, nil
}
//cached with deposits events call so not expensive
_, receipts, err := l1.FetchReceipts(ctx, block.Hash)
if err != nil {
return nil, err
Expand Down
Loading