-
Notifications
You must be signed in to change notification settings - Fork 2k
/
full_node.py
3195 lines (2875 loc) · 155 KB
/
full_node.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
import asyncio
import contextlib
import copy
import dataclasses
import logging
import multiprocessing
import random
import sqlite3
import time
import traceback
from collections.abc import AsyncIterator, Awaitable, Sequence
from multiprocessing.context import BaseContext
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Optional, TextIO, Union, cast, final
from chia_rs import (
AugSchemeMPL,
BLSCache,
get_flags_for_height_and_constants,
run_block_generator,
run_block_generator2,
)
from packaging.version import Version
from chia.consensus.block_body_validation import ForkInfo
from chia.consensus.block_creation import unfinished_block_to_full_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain import AddBlockResult, Blockchain, BlockchainMutexPriority, StateChangeSummary
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.constants import ConsensusConstants
from chia.consensus.cost_calculator import NPCResult
from chia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from chia.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from chia.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from chia.consensus.pot_iterations import calculate_sp_iters
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.full_node.full_node_api import FullNodeAPI
from chia.full_node.full_node_store import FullNodeStore, FullNodeStorePeakResult, UnfinishedBlockEntry
from chia.full_node.hint_management import get_hints_and_subscription_coin_ids
from chia.full_node.hint_store import HintStore
from chia.full_node.mempool import MempoolRemoveInfo
from chia.full_node.mempool_manager import MempoolManager, NewPeakItem
from chia.full_node.signage_point import SignagePoint
from chia.full_node.subscriptions import PeerSubscriptions, peers_for_spend_bundle
from chia.full_node.sync_store import Peak, SyncStore
from chia.full_node.tx_processing_queue import TransactionQueue
from chia.full_node.weight_proof import WeightProofHandler
from chia.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
from chia.protocols.farmer_protocol import SignagePointSourceData, SPSubSlotSourceData, SPVDFSourceData
from chia.protocols.full_node_protocol import RequestBlocks, RespondBlock, RespondBlocks, RespondSignagePoint
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.shared_protocol import Capability
from chia.protocols.wallet_protocol import CoinState, CoinStateUpdate, RemovedMempoolItem
from chia.rpc.rpc_server import StateChangedProtocol
from chia.server.node_discovery import FullNodePeers
from chia.server.outbound_message import Message, NodeType, make_msg
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.classgroup import ClassgroupElement
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof, validate_vdf
from chia.types.coin_record import CoinRecord
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.header_block import HeaderBlock
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.mempool_item import MempoolItem
from chia.types.peer_info import PeerInfo
from chia.types.spend_bundle import SpendBundle
from chia.types.transaction_queue_entry import TransactionQueueEntry
from chia.types.unfinished_block import UnfinishedBlock
from chia.types.validation_state import ValidationState
from chia.types.weight_proof import WeightProof
from chia.util.augmented_chain import AugmentedBlockchain
from chia.util.bech32m import encode_puzzle_hash
from chia.util.check_fork_next_block import check_fork_next_block
from chia.util.config import process_config_start_method
from chia.util.db_synchronous import db_synchronous_on
from chia.util.db_version import lookup_db_version, set_db_version_async
from chia.util.db_wrapper import DBWrapper2, manage_connection
from chia.util.errors import ConsensusError, Err, TimestampError, ValidationError
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.limited_semaphore import LimitedSemaphore
from chia.util.path import path_from_root
from chia.util.profiler import enable_profiler, mem_profile_task, profile_task
from chia.util.safe_cancel_task import cancel_task_safe
# This is the result of calling peak_post_processing, which is then fed into peak_post_processing_2
@dataclasses.dataclass
class PeakPostProcessingResult:
mempool_peak_result: list[NewPeakItem] # The new items from calling MempoolManager.new_peak
mempool_removals: list[MempoolRemoveInfo] # The removed mempool items from calling MempoolManager.new_peak
fns_peak_result: FullNodeStorePeakResult # The result of calling FullNodeStore.new_peak
hints: list[tuple[bytes32, bytes]] # The hints added to the DB
lookup_coin_ids: list[bytes32] # The coin IDs that we need to look up to notify wallets of changes
@dataclasses.dataclass(frozen=True)
class WalletUpdate:
fork_height: uint32
peak: Peak
coin_records: list[CoinRecord]
hints: dict[bytes32, bytes32]
@final
@dataclasses.dataclass
class FullNode:
if TYPE_CHECKING:
from chia.rpc.rpc_server import RpcServiceProtocol
_protocol_check: ClassVar[RpcServiceProtocol] = cast("FullNode", None)
root_path: Path
config: dict[str, Any]
constants: ConsensusConstants
signage_point_times: list[float]
full_node_store: FullNodeStore
log: logging.Logger
db_path: Path
wallet_sync_queue: asyncio.Queue[WalletUpdate]
_segment_task: Optional[asyncio.Task[None]] = None
initialized: bool = False
_server: Optional[ChiaServer] = None
_shut_down: bool = False
pow_creation: dict[bytes32, asyncio.Event] = dataclasses.field(default_factory=dict)
state_changed_callback: Optional[StateChangedProtocol] = None
full_node_peers: Optional[FullNodePeers] = None
sync_store: SyncStore = dataclasses.field(default_factory=SyncStore)
uncompact_task: Optional[asyncio.Task[None]] = None
compact_vdf_requests: set[bytes32] = dataclasses.field(default_factory=set)
# TODO: Logging isn't setup yet so the log entries related to parsing the
# config would end up on stdout if handled here.
multiprocessing_context: Optional[BaseContext] = None
_ui_tasks: set[asyncio.Task[None]] = dataclasses.field(default_factory=set)
subscriptions: PeerSubscriptions = dataclasses.field(default_factory=PeerSubscriptions)
_transaction_queue_task: Optional[asyncio.Task[None]] = None
simulator_transaction_callback: Optional[Callable[[bytes32], Awaitable[None]]] = None
_sync_task_list: list[asyncio.Task[None]] = dataclasses.field(default_factory=list)
_transaction_queue: Optional[TransactionQueue] = None
_compact_vdf_sem: Optional[LimitedSemaphore] = None
_new_peak_sem: Optional[LimitedSemaphore] = None
_add_transaction_semaphore: Optional[asyncio.Semaphore] = None
_db_wrapper: Optional[DBWrapper2] = None
_hint_store: Optional[HintStore] = None
_block_store: Optional[BlockStore] = None
_coin_store: Optional[CoinStore] = None
_mempool_manager: Optional[MempoolManager] = None
_init_weight_proof: Optional[asyncio.Task[None]] = None
_blockchain: Optional[Blockchain] = None
_timelord_lock: Optional[asyncio.Lock] = None
weight_proof_handler: Optional[WeightProofHandler] = None
# hashes of peaks that failed long sync on chip13 Validation
bad_peak_cache: dict[bytes32, uint32] = dataclasses.field(default_factory=dict)
wallet_sync_task: Optional[asyncio.Task[None]] = None
_bls_cache: BLSCache = dataclasses.field(default_factory=lambda: BLSCache(50000))
@property
def server(self) -> ChiaServer:
# This is a stop gap until the class usage is refactored such the values of
# integral attributes are known at creation of the instance.
if self._server is None:
raise RuntimeError("server not assigned")
return self._server
@classmethod
async def create(
cls,
config: dict[str, Any],
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = __name__,
) -> FullNode:
# NOTE: async to force the queue creation to occur when an event loop is available
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
db_path = path_from_root(root_path, db_path_replaced)
db_path.parent.mkdir(parents=True, exist_ok=True)
return cls(
root_path=root_path,
config=config,
constants=consensus_constants,
signage_point_times=[time.time() for _ in range(consensus_constants.NUM_SPS_SUB_SLOT)],
full_node_store=FullNodeStore(consensus_constants),
log=logging.getLogger(name),
db_path=db_path,
wallet_sync_queue=asyncio.Queue(),
)
@contextlib.asynccontextmanager
async def manage(self) -> AsyncIterator[None]:
self._timelord_lock = asyncio.Lock()
self._compact_vdf_sem = LimitedSemaphore.create(active_limit=4, waiting_limit=20)
# We don't want to run too many concurrent new_peak instances, because it would fetch the same block from
# multiple peers and re-validate.
self._new_peak_sem = LimitedSemaphore.create(active_limit=2, waiting_limit=20)
# These many respond_transaction tasks can be active at any point in time
self._add_transaction_semaphore = asyncio.Semaphore(200)
sql_log_path: Optional[Path] = None
with contextlib.ExitStack() as exit_stack:
sql_log_file: Optional[TextIO] = None
if self.config.get("log_sqlite_cmds", False):
sql_log_path = path_from_root(self.root_path, "log/sql.log")
self.log.info(f"logging SQL commands to {sql_log_path}")
sql_log_file = exit_stack.enter_context(sql_log_path.open("a", encoding="utf-8"))
# create the store (db) and full node instance
# TODO: is this standardized and thus able to be handled by DBWrapper2?
async with manage_connection(self.db_path, log_file=sql_log_file, name="version_check") as db_connection:
db_version = await lookup_db_version(db_connection)
self.log.info(f"using blockchain database {self.db_path}, which is version {db_version}")
db_sync = db_synchronous_on(self.config.get("db_sync", "auto"))
self.log.info(f"opening blockchain DB: synchronous={db_sync}")
async with DBWrapper2.managed(
self.db_path,
db_version=db_version,
reader_count=self.config.get("db_readers", 4),
log_path=sql_log_path,
synchronous=db_sync,
) as self._db_wrapper:
if self.db_wrapper.db_version != 2:
async with self.db_wrapper.reader_no_transaction() as conn:
async with conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='full_blocks'"
) as cur:
if len(list(await cur.fetchall())) == 0:
try:
# this is a new DB file. Make it v2
async with self.db_wrapper.writer_maybe_transaction() as w_conn:
await set_db_version_async(w_conn, 2)
self.db_wrapper.db_version = 2
self.log.info("blockchain database is empty, configuring as v2")
except sqlite3.OperationalError:
# it could be a database created with "chia init", which is
# empty except it has the database_version table
pass
self._block_store = await BlockStore.create(self.db_wrapper)
self._hint_store = await HintStore.create(self.db_wrapper)
self._coin_store = await CoinStore.create(self.db_wrapper)
self.log.info("Initializing blockchain from disk")
start_time = time.monotonic()
reserved_cores = self.config.get("reserved_cores", 0)
single_threaded = self.config.get("single_threaded", False)
multiprocessing_start_method = process_config_start_method(config=self.config, log=self.log)
self.multiprocessing_context = multiprocessing.get_context(method=multiprocessing_start_method)
self._blockchain = await Blockchain.create(
coin_store=self.coin_store,
block_store=self.block_store,
consensus_constants=self.constants,
blockchain_dir=self.db_path.parent,
reserved_cores=reserved_cores,
single_threaded=single_threaded,
)
self._mempool_manager = MempoolManager(
get_coin_records=self.coin_store.get_coin_records,
consensus_constants=self.constants,
single_threaded=single_threaded,
)
# Transactions go into this queue from the server, and get sent to respond_transaction
self._transaction_queue = TransactionQueue(1000, self.log)
self._transaction_queue_task: asyncio.Task[None] = asyncio.create_task(self._handle_transactions())
self._init_weight_proof = asyncio.create_task(self.initialize_weight_proof())
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "node", self.log))
self.profile_block_validation = self.config.get("profile_block_validation", False)
if self.profile_block_validation: # pragma: no cover
# this is not covered by any unit tests as it's essentially test code
# itself. It's exercised manually when investigating performance issues
profile_dir = path_from_root(self.root_path, "block-validation-profile")
profile_dir.mkdir(parents=True, exist_ok=True)
if self.config.get("enable_memory_profiler", False):
asyncio.create_task(mem_profile_task(self.root_path, "node", self.log))
time_taken = time.monotonic() - start_time
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
num_unspent = await self.coin_store.num_unspent()
if num_unspent > 0:
self.log.error(
f"Inconsistent blockchain DB file! Could not find peak block but found {num_unspent} coins! "
"This is a fatal error. The blockchain database may be corrupt"
)
raise RuntimeError("corrupt blockchain DB")
else:
self.log.info(
f"Blockchain initialized to peak {peak.header_hash} height"
f" {peak.height}, "
f"time taken: {int(time_taken)}s"
)
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_tx_peak(), None)
assert len(pending_tx.items) == 0 # no pending transactions when starting up
full_peak: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert full_peak is not None
state_change_summary = StateChangeSummary(peak, uint32(max(peak.height - 1, 0)), [], [], [], [])
ppp_result: PeakPostProcessingResult = await self.peak_post_processing(
full_peak, state_change_summary, None
)
await self.peak_post_processing_2(full_peak, None, state_change_summary, ppp_result)
if self.config["send_uncompact_interval"] != 0:
sanitize_weight_proof_only = False
if "sanitize_weight_proof_only" in self.config:
sanitize_weight_proof_only = self.config["sanitize_weight_proof_only"]
assert self.config["target_uncompact_proofs"] != 0
self.uncompact_task = asyncio.create_task(
self.broadcast_uncompact_blocks(
self.config["send_uncompact_interval"],
self.config["target_uncompact_proofs"],
sanitize_weight_proof_only,
)
)
if self.wallet_sync_task is None or self.wallet_sync_task.done():
self.wallet_sync_task = asyncio.create_task(self._wallets_sync_task_handler())
self.initialized = True
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.start())
try:
yield
finally:
self._shut_down = True
if self._init_weight_proof is not None:
self._init_weight_proof.cancel()
# blockchain is created in _start and in certain cases it may not exist here during _close
if self._blockchain is not None:
self.blockchain.shut_down()
# same for mempool_manager
if self._mempool_manager is not None:
self.mempool_manager.shut_down()
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.close())
if self.uncompact_task is not None:
self.uncompact_task.cancel()
if self._transaction_queue_task is not None:
self._transaction_queue_task.cancel()
cancel_task_safe(task=self.wallet_sync_task, log=self.log)
for one_sync_task in self._sync_task_list:
if not one_sync_task.done():
cancel_task_safe(task=one_sync_task, log=self.log)
for task_id, task in list(self.full_node_store.tx_fetch_tasks.items()):
cancel_task_safe(task, self.log)
if self._init_weight_proof is not None:
await asyncio.wait([self._init_weight_proof])
for one_sync_task in self._sync_task_list:
if one_sync_task.done():
self.log.info(f"Long sync task {one_sync_task.get_name()} done")
else:
with contextlib.suppress(asyncio.CancelledError):
self.log.info(f"Awaiting long sync task {one_sync_task.get_name()}")
await one_sync_task
@property
def block_store(self) -> BlockStore:
assert self._block_store is not None
return self._block_store
@property
def timelord_lock(self) -> asyncio.Lock:
assert self._timelord_lock is not None
return self._timelord_lock
@property
def mempool_manager(self) -> MempoolManager:
assert self._mempool_manager is not None
return self._mempool_manager
@property
def blockchain(self) -> Blockchain:
assert self._blockchain is not None
return self._blockchain
@property
def coin_store(self) -> CoinStore:
assert self._coin_store is not None
return self._coin_store
@property
def add_transaction_semaphore(self) -> asyncio.Semaphore:
assert self._add_transaction_semaphore is not None
return self._add_transaction_semaphore
@property
def transaction_queue(self) -> TransactionQueue:
assert self._transaction_queue is not None
return self._transaction_queue
@property
def db_wrapper(self) -> DBWrapper2:
assert self._db_wrapper is not None
return self._db_wrapper
@property
def hint_store(self) -> HintStore:
assert self._hint_store is not None
return self._hint_store
@property
def new_peak_sem(self) -> LimitedSemaphore:
assert self._new_peak_sem is not None
return self._new_peak_sem
@property
def compact_vdf_sem(self) -> LimitedSemaphore:
assert self._compact_vdf_sem is not None
return self._compact_vdf_sem
def get_connections(self, request_node_type: Optional[NodeType]) -> list[dict[str, Any]]:
connections = self.server.get_connections(request_node_type)
con_info: list[dict[str, Any]] = []
if self.sync_store is not None:
peak_store = self.sync_store.peer_to_peak
else:
peak_store = None
for con in connections:
if peak_store is not None and con.peer_node_id in peak_store:
peak = peak_store[con.peer_node_id]
peak_height = peak.height
peak_hash = peak.header_hash
peak_weight = peak.weight
else:
peak_height = None
peak_hash = None
peak_weight = None
con_dict: dict[str, Any] = {
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_info.host,
"peer_port": con.peer_info.port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
"peak_height": peak_height,
"peak_weight": peak_weight,
"peak_hash": peak_hash,
}
con_info.append(con_dict)
return con_info
def _set_state_changed_callback(self, callback: StateChangedProtocol) -> None:
self.state_changed_callback = callback
async def _handle_one_transaction(self, entry: TransactionQueueEntry) -> None:
peer = entry.peer
try:
inc_status, err = await self.add_transaction(entry.transaction, entry.spend_name, peer, entry.test)
entry.done.set((inc_status, err))
except asyncio.CancelledError:
error_stack = traceback.format_exc()
self.log.debug(f"Cancelling _handle_one_transaction, closing: {error_stack}")
except Exception:
error_stack = traceback.format_exc()
self.log.error(f"Error in _handle_one_transaction, closing: {error_stack}")
if peer is not None:
await peer.close()
finally:
self.add_transaction_semaphore.release()
async def _handle_transactions(self) -> None:
while not self._shut_down:
# We use a semaphore to make sure we don't send more than 200 concurrent calls of respond_transaction.
# However, doing them one at a time would be slow, because they get sent to other processes.
await self.add_transaction_semaphore.acquire()
item: TransactionQueueEntry = await self.transaction_queue.pop()
asyncio.create_task(self._handle_one_transaction(item))
async def initialize_weight_proof(self) -> None:
self.weight_proof_handler = WeightProofHandler(
constants=self.constants,
blockchain=self.blockchain,
multiprocessing_context=self.multiprocessing_context,
)
peak = self.blockchain.get_peak()
if peak is not None:
await self.weight_proof_handler.create_sub_epoch_segments()
def set_server(self, server: ChiaServer) -> None:
self._server = server
dns_servers: list[str] = []
network_name = self.config["selected_network"]
try:
default_port = self.config["network_overrides"]["config"][network_name]["default_full_node_port"]
except Exception:
self.log.info("Default port field not found in config.")
default_port = None
if "dns_servers" in self.config:
dns_servers = self.config["dns_servers"]
elif network_name == "mainnet":
# If `dns_servers` is missing from the `config`, hardcode it if we're running mainnet.
dns_servers.append("dns-introducer.chia.net")
try:
self.full_node_peers = FullNodePeers(
self.server,
self.config["target_outbound_peer_count"],
self.root_path / Path(self.config.get("peers_file_path", "db/peers.dat")),
self.config["introducer_peer"],
dns_servers,
self.config["peer_connect_interval"],
self.config["selected_network"],
default_port,
self.log,
)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str, change_data: Optional[dict[str, Any]] = None) -> None:
if self.state_changed_callback is not None:
self.state_changed_callback(change, change_data)
async def short_sync_batch(self, peer: WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:
"""
Tries to sync to a chain which is not too far in the future, by downloading batches of blocks. If the first
block that we download is not connected to our chain, we return False and do an expensive long sync instead.
Long sync is not preferred because it requires downloading and validating a weight proof.
Args:
peer: peer to sync from
start_height: height that we should start downloading at. (Our peak is higher)
target_height: target to sync to
Returns:
False if the fork point was not found, and we need to do a long sync. True otherwise.
"""
# Don't trigger multiple batch syncs to the same peer
if self.sync_store.is_backtrack_syncing(node_id=peer.peer_node_id):
return True # Don't batch sync, we are already in progress of a backtrack sync
if peer.peer_node_id in self.sync_store.batch_syncing:
return True # Don't trigger a long sync
self.sync_store.batch_syncing.add(peer.peer_node_id)
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
if start_height > 0:
first = await peer.call_api(
FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(start_height), False)
)
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
self.log.error(f"Error short batch syncing, could not fetch block at height {start_height}")
return False
if not self.blockchain.contains_block(first.block.prev_header_hash):
self.log.info("Batch syncing stopped, this is a deep chain")
self.sync_store.batch_syncing.remove(peer.peer_node_id)
# First sb not connected to our blockchain, do a long sync instead
return False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
if self._segment_task is not None and (not self._segment_task.done()):
try:
self._segment_task.cancel()
except Exception as e:
self.log.warning(f"failed to cancel segment task {e}")
self._segment_task = None
try:
peer_info = peer.get_peer_logging()
if start_height > 0:
fork_hash = self.blockchain.height_to_hash(uint32(start_height - 1))
else:
fork_hash = self.constants.GENESIS_CHALLENGE
assert fork_hash
fork_info = ForkInfo(start_height - 1, start_height - 1, fork_hash)
for height in range(start_height, target_height, batch_size):
end_height = min(target_height, height + batch_size)
request = RequestBlocks(uint32(height), uint32(end_height), True)
response = await peer.call_api(FullNodeAPI.request_blocks, request)
if not response:
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
state_change_summary: Optional[StateChangeSummary]
prev_b = None
if response.blocks[0].height > 0:
prev_b = await self.blockchain.get_block_record_from_db(response.blocks[0].prev_header_hash)
assert prev_b is not None
new_slot = len(response.blocks[0].finished_sub_slots) > 0
ssi, diff = get_next_sub_slot_iters_and_difficulty(
self.constants, new_slot, prev_b, self.blockchain
)
vs = ValidationState(ssi, diff, None)
success, state_change_summary, err = await self.add_block_batch(
AugmentedBlockchain(self.blockchain), response.blocks, peer_info, fork_info, vs
)
if not success:
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
if state_change_summary is not None:
try:
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert peak_fb is not None
ppp_result: PeakPostProcessingResult = await self.peak_post_processing(
peak_fb,
state_change_summary,
peer,
)
await self.peak_post_processing_2(peak_fb, peer, state_change_summary, ppp_result)
except Exception:
# Still do post processing after cancel (or exception)
peak_fb = await self.blockchain.get_full_peak()
assert peak_fb is not None
await self.peak_post_processing(peak_fb, state_change_summary, peer)
raise
finally:
self.log.info(f"Added blocks {height}-{end_height}")
except (asyncio.CancelledError, Exception):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return True
async def short_sync_backtrack(
self, peer: WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
) -> bool:
"""
Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not
find the fork point 5 deeper than our peak, we return False and do a long sync instead.
Args:
peer: peer to sync from
peak_height: height of our peak
target_height: target height
target_unf_hash: partial hash of the unfinished block of the target
Returns:
True iff we found the fork point, and we do not need to long sync.
"""
try:
self.sync_store.increment_backtrack_syncing(node_id=peer.peer_node_id)
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
curr_height: int = target_height
found_fork_point = False
blocks = []
while curr_height > peak_height - 5:
# If we already have the unfinished block, don't fetch the transactions. In the normal case, we will
# already have the unfinished block, from when it was broadcast, so we just need to download the header,
# but not the transactions
fetch_tx: bool = unfinished_block is None or curr_height != target_height
curr = await peer.call_api(
FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)
)
if curr is None:
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out")
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
raise ValueError(
f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}"
)
blocks.append(curr.block)
if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:
found_fork_point = True
break
curr_height -= 1
if found_fork_point:
for block in reversed(blocks):
# when syncing, we won't share any signatures with the
# mempool, so there's no need to pass in the BLS cache.
await self.add_block(block, peer)
except (asyncio.CancelledError, Exception):
self.sync_store.decrement_backtrack_syncing(node_id=peer.peer_node_id)
raise
self.sync_store.decrement_backtrack_syncing(node_id=peer.peer_node_id)
return found_fork_point
async def _refresh_ui_connections(self, sleep_before: float = 0) -> None:
if sleep_before > 0:
await asyncio.sleep(sleep_before)
self._state_changed("peer_changed_peak")
async def new_peak(self, request: full_node_protocol.NewPeak, peer: WSChiaConnection) -> None:
"""
We have received a notification of a new peak from a peer. This happens either when we have just connected,
or when the peer has updated their peak.
Args:
request: information about the new peak
peer: peer that sent the message
"""
try:
seen_header_hash = self.sync_store.seen_header_hash(request.header_hash)
# Updates heights in the UI. Sleeps 1.5s before, so other peers have time to update their peaks as well.
# Limit to 3 refreshes.
if not seen_header_hash and len(self._ui_tasks) < 3:
self._ui_tasks.add(asyncio.create_task(self._refresh_ui_connections(1.5)))
# Prune completed connect tasks
self._ui_tasks = set(filter(lambda t: not t.done(), self._ui_tasks))
except Exception as e:
self.log.warning(f"Exception UI refresh task: {e}")
# Store this peak/peer combination in case we want to sync to it, and to keep track of peers
self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)
if self.blockchain.contains_block(request.header_hash):
return None
# Not interested in less heavy peaks
peak: Optional[BlockRecord] = self.blockchain.get_peak()
curr_peak_height = uint32(0) if peak is None else peak.height
if peak is not None and peak.weight > request.weight:
return None
if self.sync_store.get_sync_mode():
# If peer connects while we are syncing, check if they have the block we are syncing towards
target_peak = self.sync_store.target_peak
if target_peak is not None and request.header_hash != target_peak.header_hash:
peak_peers: set[bytes32] = self.sync_store.get_peers_that_have_peak([target_peak.header_hash])
# Don't ask if we already know this peer has the peak
if peer.peer_node_id not in peak_peers:
target_peak_response: Optional[RespondBlock] = await peer.call_api(
FullNodeAPI.request_block,
full_node_protocol.RequestBlock(target_peak.height, False),
timeout=10,
)
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
target_peak.header_hash,
peer.peer_node_id,
target_peak_response.block.weight,
target_peak.height,
False,
)
else:
if (
curr_peak_height <= request.height
and request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]
):
# This is the normal case of receiving the next block
if await self.short_sync_backtrack(
peer, curr_peak_height, request.height, request.unfinished_reward_block_hash
):
return None
if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# This is the case of syncing up more than a few blocks, at the start of the chain
self.log.debug("Doing batch sync, no backup")
await self.short_sync_batch(peer, uint32(0), request.height)
return None
if (
curr_peak_height <= request.height
and request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]
):
# This case of being behind but not by so much
if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):
return None
# Clean up task reference list (used to prevent gc from killing running tasks)
for oldtask in self._sync_task_list[:]:
if oldtask.done():
self._sync_task_list.remove(oldtask)
# This is the either the case where we were not able to sync successfully (for example, due to the fork
# point being in the past), or we are very far behind. Performs a long sync.
# Multiple tasks may be created here. If we don't save all handles, a task could enter a sync object
# and be cleaned up by the GC, corrupting the sync object and possibly not allowing anything else in.
self._sync_task_list.append(asyncio.create_task(self._sync()))
async def send_peak_to_timelords(
self, peak_block: Optional[FullBlock] = None, peer: Optional[WSChiaConnection] = None
) -> None:
"""
Sends current peak to timelords
"""
if peak_block is None:
peak_block = await self.blockchain.get_full_peak()
if peak_block is not None:
peak = self.blockchain.block_record(peak_block.header_hash)
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
peak.required_iters,
peak_block,
True,
)
recent_rc = self.blockchain.get_recent_reward_challenges()
curr = peak
while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot:
curr = self.blockchain.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
last_csb_or_eos = curr.total_iters
else:
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
curr = peak
passed_ses_height_but_not_yet_included = True
while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0:
if curr.sub_epoch_summary_included:
passed_ses_height_but_not_yet_included = False
curr = self.blockchain.block_record(curr.prev_hash)
if curr.sub_epoch_summary_included or curr.height == 0:
passed_ses_height_but_not_yet_included = False
timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord(
peak_block.reward_chain_block,
difficulty,
peak.deficit,
peak.sub_slot_iters,
ses,
recent_rc,
last_csb_or_eos,
passed_ses_height_but_not_yet_included,
)
msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak)
if peer is None:
await self.server.send_to_all([msg], NodeType.TIMELORD)
else:
await self.server.send_to_specific([msg], peer.peer_node_id)
async def synced(self, block_is_current_at: Optional[uint64] = None) -> bool:
if block_is_current_at is None:
block_is_current_at = uint64(int(time.time() - 60 * 7))
if "simulator" in str(self.config.get("selected_network")):
return True # sim is always synced because it has no peers
curr: Optional[BlockRecord] = self.blockchain.get_peak()
if curr is None:
return False
while curr is not None and not curr.is_transaction_block:
curr = self.blockchain.try_block_record(curr.prev_hash)
if (
curr is None
or curr.timestamp is None
or curr.timestamp < block_is_current_at
or self.sync_store.get_sync_mode()
):
return False
else:
return True
async def on_connect(self, connection: WSChiaConnection) -> None:
"""
Whenever we connect to another node / wallet, send them our current heads. Also send heads to farmers
and challenges to timelords.
"""
self._state_changed("add_connection")
self._state_changed("sync_mode")
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.on_connect(connection))
if self.initialized is False:
return None
if connection.connection_type is NodeType.FULL_NODE:
# Send filter to node and request mempool items that are not in it (Only if we are currently synced)
synced = await self.synced()
peak_height = self.blockchain.get_peak_height()
if synced and peak_height is not None:
my_filter = self.mempool_manager.get_filter()
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request)
await connection.send_message(msg)
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
if peak_full is not None:
peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash)
if connection.connection_type is NodeType.FULL_NODE:
request_node = full_node_protocol.NewPeak(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
peak_full.reward_chain_block.get_unfinished().get_hash(),
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node))
elif connection.connection_type is NodeType.WALLET:
# If connected to a wallet, send the Peak
request_wallet = wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self.send_peak_to_timelords()
async def on_disconnect(self, connection: WSChiaConnection) -> None:
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self._state_changed("close_connection")
self._state_changed("sync_mode")
if self.sync_store is not None:
self.sync_store.peer_disconnected(connection.peer_node_id)
# Remove all ph | coin id subscription for this peer
self.subscriptions.remove_peer(connection.peer_node_id)
async def _sync(self) -> None:
"""
Performs a full sync of the blockchain up to the peak.
- Wait a few seconds for peers to send us their peaks
- Select the heaviest peak, and request a weight proof from a peer with that peak
- Validate the weight proof, and disconnect from the peer if invalid
- Find the fork point to see where to start downloading blocks
- Download blocks in batch (and in parallel) and verify them one at a time
- Disconnect peers that provide invalid blocks or don't have the blocks
"""
# Ensure we are only syncing once and not double calling this method
if self.sync_store.get_sync_mode():
return None
if self.sync_store.get_long_sync():
self.log.debug("already in long sync")
return None
self.sync_store.set_long_sync(True)
self.log.debug("long sync started")
try:
self.log.info("Starting to perform sync.")
# Wait until we have 3 peaks or up to a max of 30 seconds
max_iterations = int(self.config.get("max_sync_wait", 30)) * 10
self.log.info(f"Waiting to receive peaks from peers. (timeout: {max_iterations/10}s)")
peaks = []
for i in range(max_iterations):
peaks = [peak.header_hash for peak in self.sync_store.get_peak_of_each_peer().values()]
if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:
if self._shut_down:
return None
await asyncio.sleep(0.1)
continue
break
self.log.info(f"Collected a total of {len(peaks)} peaks.")
# Based on responses from peers about the current peaks, see which peak is the heaviest
# (similar to longest chain rule).
target_peak = self.sync_store.get_heaviest_peak()
if target_peak is None:
raise RuntimeError("Not performing sync, no peaks collected")
self.sync_store.target_peak = target_peak
self.log.info(f"Selected peak {target_peak}")
# Check which peers are updated to this height
peers = self.server.get_connections(NodeType.FULL_NODE)
coroutines = []
for peer in peers:
coroutines.append(
peer.call_api(
FullNodeAPI.request_block,
full_node_protocol.RequestBlock(target_peak.height, True),
timeout=10,
)
)
for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
target_peak.header_hash, peers[i].peer_node_id, target_peak.weight, target_peak.height, False
)
# TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak
fork_point, summaries = await self.request_validate_wp(
target_peak.header_hash, target_peak.height, target_peak.weight
)
# Ensures that the fork point does not change
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
await self.blockchain.warmup(fork_point)
await self.sync_from_fork_point(fork_point, target_peak.height, target_peak.header_hash, summaries)