/
endpoints_debug.go
469 lines (408 loc) · 15.9 KB
/
endpoints_debug.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
package jsonrpc
import (
"context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"sort"
"strings"
"sync"
"time"
"github.com/hoaleee/go-ethereum/common"
ethTypes "github.com/hoaleee/go-ethereum/core/types"
"github.com/hoaleee/zkevm-node/jsonrpc/types"
"github.com/hoaleee/zkevm-node/log"
"github.com/hoaleee/zkevm-node/state"
"github.com/hoaleee/zkevm-node/state/runtime/fakevm"
"github.com/hoaleee/zkevm-node/state/runtime/instrumentation"
"github.com/jackc/pgx/v4"
)
var defaultTraceConfig = &traceConfig{
DisableStorage: false,
DisableStack: false,
EnableMemory: false,
EnableReturnData: false,
Tracer: nil,
}
// DebugEndpoints is the debug jsonrpc endpoint
type DebugEndpoints struct {
cfg Config
state types.StateInterface
etherman types.EthermanInterface
txMan DBTxManager
}
// NewDebugEndpoints returns DebugEndpoints
func NewDebugEndpoints(cfg Config, state types.StateInterface, etherman types.EthermanInterface) *DebugEndpoints {
return &DebugEndpoints{
cfg: cfg,
state: state,
etherman: etherman,
}
}
type traceConfig struct {
DisableStorage bool `json:"disableStorage"`
DisableStack bool `json:"disableStack"`
EnableMemory bool `json:"enableMemory"`
EnableReturnData bool `json:"enableReturnData"`
Tracer *string `json:"tracer"`
TracerConfig json.RawMessage `json:"tracerConfig"`
}
// StructLogRes represents the debug trace information for each opcode
type StructLogRes struct {
Pc uint64 `json:"pc"`
Op string `json:"op"`
Gas uint64 `json:"gas"`
GasCost uint64 `json:"gasCost"`
Depth int `json:"depth"`
Error string `json:"error,omitempty"`
Stack *[]types.ArgBig `json:"stack,omitempty"`
Memory *[]string `json:"memory,omitempty"`
Storage *map[string]string `json:"storage,omitempty"`
RefundCounter uint64 `json:"refund,omitempty"`
}
type traceTransactionResponse struct {
Gas uint64 `json:"gas"`
Failed bool `json:"failed"`
ReturnValue interface{} `json:"returnValue"`
StructLogs []StructLogRes `json:"structLogs"`
}
type traceBlockTransactionResponse struct {
Result interface{} `json:"result"`
}
type traceBatchTransactionResponse struct {
TxHash common.Hash `json:"txHash"`
Result interface{} `json:"result"`
}
// TraceTransaction creates a response for debug_traceTransaction request.
// See https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-debug#debugtracetransaction
func (d *DebugEndpoints) TraceTransaction(hash types.ArgHash, cfg *traceConfig) (interface{}, types.Error) {
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
return d.buildTraceTransaction(ctx, hash.Hash(), cfg, dbTx)
})
}
// TraceBlockByNumber creates a response for debug_traceBlockByNumber request.
// See https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-debug#debugtraceblockbynumber
func (d *DebugEndpoints) TraceBlockByNumber(number types.BlockNumber, cfg *traceConfig) (interface{}, types.Error) {
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
blockNumber, rpcErr := number.GetNumericBlockNumber(ctx, d.state, d.etherman, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
block, err := d.state.GetL2BlockByNumber(ctx, blockNumber, dbTx)
if errors.Is(err, state.ErrNotFound) {
return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block #%d not found", blockNumber))
} else if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by number", err, true)
}
traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
return traces, nil
})
}
// TraceBlockByHash creates a response for debug_traceBlockByHash request.
// See https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-debug#debugtraceblockbyhash
func (d *DebugEndpoints) TraceBlockByHash(hash types.ArgHash, cfg *traceConfig) (interface{}, types.Error) {
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
block, err := d.state.GetL2BlockByHash(ctx, hash.Hash(), dbTx)
if errors.Is(err, state.ErrNotFound) {
return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block %s not found", hash.Hash().String()))
} else if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash", err, true)
}
traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
return traces, nil
})
}
// TraceBatchByNumber creates a response for debug_traceBatchByNumber request.
// this endpoint tries to help clients to get traces at once for all the transactions
// attached to the same batch.
//
// IMPORTANT: in order to take advantage of the infrastructure automatically scaling,
// instead of parallelizing the trace transaction internally and pushing all the load
// to a single jRPC and Executor instance, the code will redirect the trace transaction
// requests to the same url, making them external calls, so we can process in parallel
// with multiple jRPC and Executor instances.
//
// the request flow will work as follows:
// -> user do a trace batch request
// -> jRPC balancer picks a jRPC server to handle the trace batch request
// -> picked jRPC sends parallel trace transaction requests for each transaction in the batch
// -> jRPC balancer sends each request to a different jRPC to handle the trace transaction requests
// -> picked jRPC server group trace transaction responses from other jRPC servers
// -> picked jRPC respond the initial request to the user with all the tx traces
func (d *DebugEndpoints) TraceBatchByNumber(httpRequest *http.Request, number types.BatchNumber, cfg *traceConfig) (interface{}, types.Error) {
type traceResponse struct {
blockNumber uint64
txIndex uint64
txHash common.Hash
trace interface{}
err error
}
// the size of the buffer defines
// how many txs it will process in parallel.
const bufferSize = 10
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
batchNumber, rpcErr := number.GetNumericBatchNumber(ctx, d.state, d.etherman, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
batch, err := d.state.GetBatchByNumber(ctx, batchNumber, dbTx)
if errors.Is(err, state.ErrNotFound) {
return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("batch #%d not found", batchNumber))
} else if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, "failed to get batch by number", err, true)
}
txs, _, err := d.state.GetTransactionsByBatchNumber(ctx, batch.BatchNumber, dbTx)
if !errors.Is(err, state.ErrNotFound) && err != nil {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch txs from state by number %v to create the traces", batchNumber), err, true)
}
receipts := make([]ethTypes.Receipt, 0, len(txs))
for _, tx := range txs {
receipt, err := d.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx)
if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v to get trace", tx.Hash().String()), err, true)
}
receipts = append(receipts, *receipt)
}
requests := make(chan (ethTypes.Receipt), bufferSize)
mu := &sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(len(receipts))
responses := make([]traceResponse, 0, len(receipts))
// gets the trace from the jRPC and adds it to the responses
loadTraceByTxHash := func(d *DebugEndpoints, receipt ethTypes.Receipt, cfg *traceConfig) {
response := traceResponse{
blockNumber: receipt.BlockNumber.Uint64(),
txIndex: uint64(receipt.TransactionIndex),
txHash: receipt.TxHash,
}
defer wg.Done()
trace, err := d.TraceTransaction(types.ArgHash(receipt.TxHash), cfg)
if err != nil {
err := fmt.Errorf("failed to get tx trace for tx %v, err: %w", receipt.TxHash.String(), err)
log.Errorf(err.Error())
response.err = err
} else {
response.trace = trace
}
// add to the responses
mu.Lock()
defer mu.Unlock()
responses = append(responses, response)
}
// goes through the buffer and loads the trace
// by all the transactions added in the buffer
// then add the results to the responses map
go func() {
index := uint(0)
for req := range requests {
go loadTraceByTxHash(d, req, cfg)
index++
}
}()
// add receipts to the buffer
for _, receipt := range receipts {
requests <- receipt
}
// wait the traces to be loaded
if waitTimeout(&wg, d.cfg.ReadTimeout.Duration) {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: timeout reached", batchNumber), nil, true)
}
close(requests)
// since the txs are attached to a L2 Block and the L2 Block is
// the struct attached to the Batch, in order to always respond
// the traces in the same order, we need to order the transactions
// first by block number and then by tx index, so we can have something
// close to the txs being sorted by a tx index related to the batch
sort.Slice(responses, func(i, j int) bool {
if responses[i].txIndex != responses[j].txIndex {
return responses[i].txIndex < responses[j].txIndex
}
return responses[i].blockNumber < responses[j].blockNumber
})
// build the batch trace response array
traces := make([]traceBatchTransactionResponse, 0, len(receipts))
for _, response := range responses {
if response.err != nil {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: failed to get trace for tx: %v, err: %v", batchNumber, response.txHash.String(), response.err.Error()), nil, true)
}
traces = append(traces, traceBatchTransactionResponse{
TxHash: response.txHash,
Result: response.trace,
})
}
return traces, nil
})
}
func (d *DebugEndpoints) buildTraceBlock(ctx context.Context, txs []*ethTypes.Transaction, cfg *traceConfig, dbTx pgx.Tx) (interface{}, types.Error) {
traces := []traceBlockTransactionResponse{}
for _, tx := range txs {
traceTransaction, err := d.buildTraceTransaction(ctx, tx.Hash(), cfg, dbTx)
if err != nil {
errMsg := fmt.Sprintf("failed to get trace for transaction %v: %v", tx.Hash().String(), err.Error())
return RPCErrorResponse(types.DefaultErrorCode, errMsg, err, true)
}
traceBlockTransaction := traceBlockTransactionResponse{
Result: traceTransaction,
}
traces = append(traces, traceBlockTransaction)
}
return traces, nil
}
func (d *DebugEndpoints) buildTraceTransaction(ctx context.Context, hash common.Hash, cfg *traceConfig, dbTx pgx.Tx) (interface{}, types.Error) {
traceCfg := cfg
if traceCfg == nil {
traceCfg = defaultTraceConfig
}
// check tracer
if traceCfg.Tracer != nil && *traceCfg.Tracer != "" && !isBuiltInTracer(*traceCfg.Tracer) && !isJSCustomTracer(*traceCfg.Tracer) {
return RPCErrorResponse(types.DefaultErrorCode, "invalid tracer", nil, false)
}
stateTraceConfig := state.TraceConfig{
DisableStack: traceCfg.DisableStack,
DisableStorage: traceCfg.DisableStorage,
EnableMemory: traceCfg.EnableMemory,
EnableReturnData: traceCfg.EnableReturnData,
Tracer: traceCfg.Tracer,
TracerConfig: traceCfg.TracerConfig,
}
result, err := d.state.DebugTransaction(ctx, hash, stateTraceConfig, dbTx)
if errors.Is(err, state.ErrNotFound) {
return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil, false)
} else if err != nil {
errorMessage := fmt.Sprintf("failed to get trace: %v", err.Error())
return nil, types.NewRPCError(types.DefaultErrorCode, errorMessage)
}
// if a tracer was specified, then return the trace result
if stateTraceConfig.Tracer != nil && *stateTraceConfig.Tracer != "" && len(result.ExecutorTraceResult) > 0 {
return result.ExecutorTraceResult, nil
}
receipt, err := d.state.GetTransactionReceipt(ctx, hash, dbTx)
if err != nil {
const errorMessage = "failed to tx receipt"
log.Errorf("%v: %v", errorMessage, err)
return nil, types.NewRPCError(types.DefaultErrorCode, errorMessage)
}
failed := receipt.Status == ethTypes.ReceiptStatusFailed
var returnValue interface{}
if stateTraceConfig.EnableReturnData {
returnValue = common.Bytes2Hex(result.ReturnValue)
}
structLogs := d.buildStructLogs(result.StructLogs, *traceCfg)
resp := traceTransactionResponse{
Gas: result.GasUsed,
Failed: failed,
ReturnValue: returnValue,
StructLogs: structLogs,
}
return resp, nil
}
func (d *DebugEndpoints) buildStructLogs(stateStructLogs []instrumentation.StructLog, cfg traceConfig) []StructLogRes {
structLogs := make([]StructLogRes, 0, len(stateStructLogs))
memory := fakevm.NewMemory()
for _, structLog := range stateStructLogs {
errRes := ""
if structLog.Err != nil {
errRes = structLog.Err.Error()
}
op := structLog.Op
if op == "SHA3" {
op = "KECCAK256"
} else if op == "STOP" && structLog.Pc == 0 {
// this stop is generated for calls with single
// step(no depth increase) and must be ignored
continue
}
structLogRes := StructLogRes{
Pc: structLog.Pc,
Op: op,
Gas: structLog.Gas,
GasCost: structLog.GasCost,
Depth: structLog.Depth,
Error: errRes,
RefundCounter: structLog.RefundCounter,
}
if !cfg.DisableStack {
stack := make([]types.ArgBig, 0, len(structLog.Stack))
for _, stackItem := range structLog.Stack {
if stackItem != nil {
stack = append(stack, types.ArgBig(*stackItem))
}
}
structLogRes.Stack = &stack
}
if cfg.EnableMemory {
memory.Resize(uint64(structLog.MemorySize))
if len(structLog.Memory) > 0 {
memory.Set(uint64(structLog.MemoryOffset), uint64(len(structLog.Memory)), structLog.Memory)
}
if structLog.MemorySize > 0 {
// Populate the structLog memory
structLog.Memory = memory.Data()
// Convert memory to string array
const memoryChunkSize = 32
memoryArray := make([]string, 0, len(structLog.Memory))
for i := 0; i < len(structLog.Memory); i = i + memoryChunkSize {
slice32Bytes := make([]byte, memoryChunkSize)
copy(slice32Bytes, structLog.Memory[i:i+memoryChunkSize])
memoryStringItem := hex.EncodeToString(slice32Bytes)
memoryArray = append(memoryArray, memoryStringItem)
}
structLogRes.Memory = &memoryArray
} else {
memory = fakevm.NewMemory()
structLogRes.Memory = &[]string{}
}
}
if !cfg.DisableStorage && len(structLog.Storage) > 0 {
storage := make(map[string]string, len(structLog.Storage))
for storageKey, storageValue := range structLog.Storage {
k := hex.EncodeToString(storageKey.Bytes())
v := hex.EncodeToString(storageValue.Bytes())
storage[k] = v
}
structLogRes.Storage = &storage
}
structLogs = append(structLogs, structLogRes)
}
return structLogs
}
// isBuiltInTracer checks if the tracer is one of the
// built-in tracers
func isBuiltInTracer(tracer string) bool {
// built-in tracers
switch tracer {
case "callTracer", "4byteTracer", "prestateTracer", "noopTracer":
return true
default:
return false
}
}
// isJSCustomTracer checks if the tracer contains the
// functions result and fault which are required for a custom tracer
// https://geth.ethereum.org/docs/developers/evm-tracing/custom-tracer
func isJSCustomTracer(tracer string) bool {
return strings.Contains(tracer, "result") && strings.Contains(tracer, "fault")
}
// waitTimeout waits for the waitGroup for the specified max timeout.
// Returns true if waiting timed out.
func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return false // completed normally
case <-time.After(timeout):
return true // timed out
}
}