Skip to content

Commit

Permalink
Merge pull request #8557 from ethereum-optimism/revert-8191-prefetchi…
Browse files Browse the repository at this point in the history
…ng_ethclient

Revert "Add `PrefetchingEthClient`, which builds a cache of ethclient data for callers."
  • Loading branch information
sebastianst authored Dec 11, 2023
2 parents 81ac209 + 8478fa6 commit 10425c1
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 434 deletions.
14 changes: 0 additions & 14 deletions op-node/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,18 +124,6 @@ var (
EnvVars: prefixEnvVars("L1_HTTP_POLL_INTERVAL"),
Value: time.Second * 12,
}
L1PrefetchingWindow = &cli.Uint64Flag{
Name: "l1.prefetching-window",
Usage: "Number of L1 blocks to prefetch in the background. Disabled if 0.",
EnvVars: prefixEnvVars("L1_PREFETCHING_WINDOW"),
Value: 0,
}
L1PrefetchingTimeout = &cli.DurationFlag{
Name: "l1.prefetching-timeout",
Usage: "Timeout for L1 prefetching. Disabled if 0.",
EnvVars: prefixEnvVars("L1_PREFETCHING_TIMEOUT"),
Value: time.Second * 30,
}
L2EngineJWTSecret = &cli.StringFlag{
Name: "l2.jwt-secret",
Usage: "Path to JWT secret key. Keys are 32 bytes, hex encoded in a file. A new key will be generated if left empty.",
Expand Down Expand Up @@ -321,8 +309,6 @@ var optionalFlags = []cli.Flag{
L1RPCMaxBatchSize,
L1RPCMaxConcurrency,
L1HTTPPollInterval,
L1PrefetchingWindow,
L1PrefetchingTimeout,
L2EngineJWTSecret,
VerifierL1Confs,
SequencerEnabledFlag,
Expand Down
10 changes: 0 additions & 10 deletions op-node/node/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,14 +159,6 @@ type L1EndpointConfig struct {
// It is recommended to use websockets or IPC for efficient following of the changing block.
// Setting this to 0 disables polling.
HttpPollInterval time.Duration

// PrefetchingWindow specifies the number of blocks to prefetch from the L1 RPC.
// Setting this to 0 disables prefetching.
PrefetchingWindow uint64

// PrefetchingTimeout specifies the timeout for prefetching from the L1 RPC.
// Setting this to 0 disables prefetching.
PrefetchingTimeout time.Duration
}

var _ L1EndpointSetup = (*L1EndpointConfig)(nil)
Expand Down Expand Up @@ -200,8 +192,6 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
rpcCfg.PrefetchingWindow = cfg.PrefetchingWindow
rpcCfg.PrefetchingTimeout = cfg.PrefetchingTimeout
return l1Node, rpcCfg, nil
}

Expand Down
16 changes: 7 additions & 9 deletions op-node/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,13 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {

func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
return &node.L1EndpointConfig{
L1NodeAddr: ctx.String(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.Bool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.String(flags.L1RPCProviderKind.Name))),
PrefetchingWindow: ctx.Uint64(flags.L1PrefetchingWindow.Name),
PrefetchingTimeout: ctx.Duration(flags.L1PrefetchingTimeout.Name),
RateLimit: ctx.Float64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
L1NodeAddr: ctx.String(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.Bool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.String(flags.L1RPCProviderKind.Name))),
RateLimit: ctx.Float64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
}
}

Expand Down
23 changes: 3 additions & 20 deletions op-service/sources/l1_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ type L1ClientConfig struct {
EthClientConfig

L1BlockRefsCacheSize int
PrefetchingWindow uint64
PrefetchingTimeout time.Duration
}

func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProviderKind) *L1ClientConfig {
Expand All @@ -47,16 +45,14 @@ func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProvide
},
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
L1BlockRefsCacheSize: fullSpan,
PrefetchingWindow: 0, // no prefetching by default
PrefetchingTimeout: 0, // no prefetching by default
}
}

// L1Client provides typed bindings to retrieve L1 data from an RPC source,
// with optimized batch requests, cached results, and flag to not trust the RPC
// (i.e. to verify all returned contents against corresponding block hashes).
type L1Client struct {
EthClientInterface
*EthClient

// cache L1BlockRef by hash
// common.Hash -> eth.L1BlockRef
Expand All @@ -70,23 +66,10 @@ func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return nil, err
}

var clientToUse EthClientInterface

if config.PrefetchingTimeout > 0 && config.PrefetchingWindow > 0 {
prefetchingEthClient, err := NewPrefetchingEthClient(ethClient, config.PrefetchingWindow, config.PrefetchingTimeout)
if err != nil {
return nil, err
}
clientToUse = prefetchingEthClient
} else {
clientToUse = ethClient
}

return &L1Client{
EthClientInterface: clientToUse,
l1BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L1BlockRef](metrics, "blockrefs", config.L1BlockRefsCacheSize),
EthClient: ethClient,
l1BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L1BlockRef](metrics, "blockrefs", config.L1BlockRefsCacheSize),
}, nil

}

// L1BlockRefByLabel returns the [eth.L1BlockRef] for the given block label.
Expand Down
Loading

0 comments on commit 10425c1

Please sign in to comment.