Skip to content

Commit

Permalink
logging tweaks
Browse files Browse the repository at this point in the history
  • Loading branch information
dyoform committed Jul 20, 2022
1 parent 074123c commit 9e7d92a
Show file tree
Hide file tree
Showing 12 changed files with 83 additions and 61 deletions.
10 changes: 5 additions & 5 deletions btc.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ var BTCInfo struct {
func btc_init() {
BTCInfo.Enabled = *btc_peer != ""
if !BTCInfo.Enabled {
LogStatus("btc", "mining disabled (no peer configured)")
log_status("btc", "mining disabled (no peer configured)")
return
}

Expand All @@ -42,7 +42,7 @@ func btc_init() {
BTCInfo.RestClient.Timeout = time.Second

if err := direct_check_path(*btc_data); err != nil {
LogStatus("btc", "direct mining disabled (%s)", err.Error())
log_status("btc", "direct mining disabled (%s)", err.Error())
} else {
BTCInfo.DirectPath = *btc_data
}
Expand All @@ -65,7 +65,7 @@ func btc_get_chains() (err error) {
func btc_sync() {
//sync chain info with our BTC peer
if err := btc_get_chains(); err != nil {
LogError("btc", "failed to get chains (%s)", err.Error())
log_error("btc", "failed to get chains (%s)", err.Error())
return //cant connect to peer
}

Expand All @@ -76,7 +76,7 @@ func btc_sync() {
//get block delta for displaying mining progress to the user
var delta int64 = int64(BTCInfo.Chain.Height) - int64(COMBInfo.Height)

LogStatus("btc", "%d blocks behind...", delta)
log_status("btc", "%d blocks behind...", delta)

var blocks chan BlockData = make(chan BlockData)
var wait sync.Mutex
Expand All @@ -94,7 +94,7 @@ func btc_sync() {

var target [32]byte = BTCInfo.Chain.TopHash
if err := btc_get_block_range(target, uint64(delta), blocks); err != nil {
LogError("btc", "failed to get blocks (%s)", err.Error())
log_error("btc", "failed to get blocks (%s)", err.Error())
}
wait.Lock() //dont leave before neominer is finished (only a problem if we use a buffered channel)
}
Expand Down
7 changes: 5 additions & 2 deletions btc_direct.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ type RawData map[[32]byte]*BlockData
func direct_parse_block_file(data []byte, blocks *RawData, path string) {
f, err := os.Open(path)
if err != nil {
LogError("direct", "cant open file %s (%s)", path, err.Error())
log_error("direct", "cant open file %s (%s)", path, err.Error())
return
}
stats, _ := f.Stat()
Expand Down Expand Up @@ -97,10 +97,13 @@ func direct_load_trace(blocks *RawData, path string, target [32]byte, length uin
//we preallocate and pass in a 128mb buffer to read the block file into (block_data)
direct_parse_block_file(block_data, blocks, block_files[b])

log_status("direct", "processed %s", block_files[b])

//now see if we have a valid chain loaded (from target to any block in history)
chain = direct_trace_chain(blocks, target, length)

if len(chain) != 0 {
log_status("direct", "chain connected. finished")
break //valid chain found
}
}
Expand All @@ -123,7 +126,7 @@ func direct_check_path(path string) (err error) {
return fmt.Errorf("no block files found")
}

LogStatus("direct", "found %d block files", len(block_files))
log_status("direct", "found %d block files", len(block_files))
return nil
}
func direct_get_block_range(path string, target [32]byte, length uint64, out chan<- BlockData) (err error) {
Expand Down
9 changes: 7 additions & 2 deletions btc_rest.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func rest_trace_chain(client *http.Client, url string, target [32]byte, length u
return nil, err
}

LogInfo("rest", "tracing %X", hash)
log_info("rest", "tracing %X", hash)

//just for the end user, this wont factor in any reorgs
var progress float64 = (float64(len(chain)) / float64(length)) * 100.0
Expand All @@ -61,10 +61,15 @@ func rest_get_block_range(client *http.Client, url string, target [32]byte, leng

//gets a list of blocks that connect the target to a known block (does not have to be the current chain tip)
//every block in this list is unknown to combcore

log_status("rest", "tracing chain...")

if chain, err = rest_trace_chain(client, url, target, length); err != nil {
return err
}

log_status("rest", "getting blocks...")

for i, h := range chain {
if block, err = rest_get_block(client, url, h); err != nil {
return err
Expand All @@ -88,7 +93,7 @@ func rest_get_block(client *http.Client, url string, hash [32]byte) (block Block
btc_parse_block(raw_data, raw_block)

if raw_block.Hash != hash {
LogPanic("rest", "recieved wrong block %X != %X", raw_block.Hash, hash)
log_panic("rest", "recieved wrong block %X != %X", raw_block.Hash, hash)
}

block.Hash = raw_block.Hash
Expand Down
30 changes: 16 additions & 14 deletions combcore.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,12 @@ func setup_graceful_shutdown() {
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
LogStatus("combcore", "terminate signal detected. shutting down...")
log_status("combcore", "terminate signal detected. shutting down...")
ingest_write()
critical.Lock()
db.Close()
shutdown.Unlock()
close_log_file()
os.Exit(-3)
}()
shutdown.Lock()
Expand Down Expand Up @@ -64,7 +66,7 @@ func combcore_set_network() {

COMBInfo.Network = *comb_network

LogStatus("combcore", "loading in %s mode", COMBInfo.Network)
log_status("combcore", "loading in %s mode", COMBInfo.Network)

//every difference between the networks is here (minus whats in libcomb)
switch COMBInfo.Network {
Expand Down Expand Up @@ -92,7 +94,7 @@ func combcore_set_network() {
COMBInfo.Prefix["decider"] = "\\purse\\data\\"
libcomb.SwitchToTestnet()
default:
LogPanic("combcore", "unknown network %s", COMBInfo.Network)
log_panic("combcore", "unknown network %s", COMBInfo.Network)
}

libcomb.SetHeight(COMBInfo.Height)
Expand All @@ -108,12 +110,12 @@ func combcore_process_block(block Block) (err error) {
}

if !DBInfo.InitialLoad {
LogInfo("combcore", "processing %d", block.Metadata.Height)
log_info("combcore", "processing %d", block.Metadata.Height)
}

if block.Metadata.Previous != COMBInfo.Hash { //sanity check
LogError("combcore", "%d %X %d %X (%X)", COMBInfo.Height, COMBInfo.Hash, block.Metadata.Height, block.Metadata.Hash, block.Metadata.Previous)
LogError("combcore", "sanity check failed, chain is broken")
log_error("combcore", "%d %X %d %X (%X)", COMBInfo.Height, COMBInfo.Hash, block.Metadata.Height, block.Metadata.Hash, block.Metadata.Previous)
log_error("combcore", "sanity check failed, chain is broken")
}

var lib_block libcomb.Block
Expand All @@ -125,8 +127,8 @@ func combcore_process_block(block Block) (err error) {

COMBInfo.Height = libcomb.GetHeight()
if COMBInfo.Height != block.Metadata.Height { //sanity check
LogError("combcore", "%d %d %X\n", COMBInfo.Height, block.Metadata.Height, block.Metadata.Hash)
LogError("combcore", "sanity check failed, height mismatch")
log_error("combcore", "%d %d %X\n", COMBInfo.Height, block.Metadata.Height, block.Metadata.Hash)
log_error("combcore", "sanity check failed, height mismatch")
}
COMBInfo.Chain[block.Metadata.Hash] = COMBInfo.Hash
COMBInfo.Hash = block.Metadata.Hash
Expand All @@ -142,21 +144,21 @@ func combcore_reorg(target [32]byte) {
var ok bool
var metadata = db_get_block_metadata_by_hash(target)

LogStatus("combcore", "reorg encountered, rolling back to block %d", metadata.Height)
log_status("combcore", "reorg encountered, rolling back to block %d", metadata.Height)

LogStatus("combcore", "tracing back...")
log_status("combcore", "tracing back...")
//trace back our in-memory chain
for COMBInfo.Hash != target {
if COMBInfo.Hash, ok = COMBInfo.Chain[COMBInfo.Hash]; !ok {
LogPanic("combcore", "reorg past checkpoint is not possible")
log_panic("combcore", "reorg past checkpoint is not possible")
}
}

LogStatus("combcore", "removing blocks from database...")
log_status("combcore", "removing blocks from database...")
//remove reorg'd blocks from the db
db_remove_blocks_after(metadata.Height + 1)

LogStatus("combcore", "unloading blocks...")
log_status("combcore", "unloading blocks...")
//unload libcomb to the target height
libcomb.GetLock()
for COMBInfo.Height != metadata.Height {
Expand All @@ -165,5 +167,5 @@ func combcore_reorg(target [32]byte) {
libcomb.FinishReorg()
libcomb.ReleaseLock()

LogStatus("combcore", "finished at %X (%d)", COMBInfo.Hash, COMBInfo.Height)
log_status("combcore", "finished at %X (%d)", COMBInfo.Hash, COMBInfo.Height)
}
4 changes: 2 additions & 2 deletions control.go
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ func (c *Control) DumpP2WSHCount(args *string, reply *struct{}) (err error) {
return fmt.Errorf("specify file name")
}

LogStatus("control", "writing to %s", *args)
log_status("control", "writing to %s", *args)

f, _ := os.Create(*args)
wait.Lock()
Expand All @@ -493,6 +493,6 @@ func (c *Control) DumpP2WSHCount(args *string, reply *struct{}) (err error) {
wait.Lock()
f.Close()

LogStatus("control", "finished")
log_status("control", "finished")
return nil
}
10 changes: 5 additions & 5 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ func db_load() {
var fingerprint [32]byte = db_compute_block_fingerprint(block.Commits)
if block.Metadata.Fingerprint != fingerprint {
//recovery not implemented yet
LogPanic("db", "fingerprint mismatch on block %d (%X != %X)", block.Metadata.Height, block.Metadata.Fingerprint, fingerprint)
log_panic("db", "fingerprint mismatch on block %d (%X != %X)", block.Metadata.Height, block.Metadata.Fingerprint, fingerprint)
}
combcore_process_block(block)
count++
Expand All @@ -372,7 +372,7 @@ func db_load() {
db_load_blocks(0, (^uint64(0))-1, blocks)
wait.Lock()

LogStatus("db", "loaded %d blocks", count)
log_status("db", "loaded %d blocks", count)
}

func db_new() {
Expand All @@ -387,16 +387,16 @@ func db_new() {

func db_start() {
if db_is_new {
LogStatus("db", "new database created (version %d)", DB_CURRENT_VERSION)
log_status("db", "new database created (version %d)", DB_CURRENT_VERSION)
db_new()
return
}

LogStatus("db", "started. loading...")
log_status("db", "started. loading...")

DBInfo.Version = db_get_version()
if DBInfo.Version != DB_CURRENT_VERSION {
LogPanic("db", "cannot load legacy db")
log_panic("db", "cannot load legacy db")
}

db_load()
Expand Down
18 changes: 11 additions & 7 deletions ingest.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,20 @@ var IngestInfo struct {
}

func ingest_init() {
IngestInfo.BatchCapacity = 10000
IngestInfo.BatchCapacity = 1000
IngestInfo.BatchCached = 0
IngestInfo.Batch = new(leveldb.Batch)
}

func ingest_write() {
if IngestInfo.BatchCached == 0 {
return
}
if err := db_write(IngestInfo.Batch); err != nil {
LogPanic("ingest", "write batch failed (%s)", err.Error())
log_panic("ingest", "write batch failed (%s)", err.Error())
return
}
log_status("ingest", "height %d", COMBInfo.Height)
IngestInfo.BatchCached = 0
}

Expand All @@ -34,13 +38,13 @@ func ingest_process_block(block_data BlockData) (reorg bool) {

//check if we already have this block
if _, ok := COMBInfo.Chain[block.Metadata.Hash]; ok {
LogInfo("ingest", "block discarded %X", block.Metadata.Hash)
log_status("ingest", "block discarded %X", block.Metadata.Hash)
return
}

//check that we have the previous block
if _, ok := COMBInfo.Chain[block.Metadata.Previous]; !ok {
LogPanic("ingest", "chain broken, mining has fucked up %X, %X", block.Metadata.Hash, block.Metadata.Previous)
log_panic("ingest", "chain broken, mining has fucked up %X, %X", block.Metadata.Hash, block.Metadata.Previous)
}

//if the previous block isnt the top block its a reorg
Expand All @@ -53,7 +57,7 @@ func ingest_process_block(block_data BlockData) (reorg bool) {

//the previous block should now be the top block
if block.Metadata.Previous != COMBInfo.Hash {
LogPanic("ingest", "reorg failed! %X != %X", block.Metadata.Previous, COMBInfo.Hash)
log_panic("ingest", "reorg failed! %X != %X", block.Metadata.Previous, COMBInfo.Hash)
}
}

Expand All @@ -63,12 +67,12 @@ func ingest_process_block(block_data BlockData) (reorg bool) {

//this doesnt touch the disk yet, just gets added to the current batch
if err = db_process_block(IngestInfo.Batch, block); err != nil {
LogPanic("ingest", "store block failed (%s)", err.Error())
log_panic("ingest", "store block failed (%s)", err.Error())
return
}
IngestInfo.BatchCached++
if err = combcore_process_block(block); err != nil {
LogPanic("ingest", "process block failed (%s)", err.Error())
log_panic("ingest", "process block failed (%s)", err.Error())
}

if IngestInfo.BatchCached >= IngestInfo.BatchCapacity {
Expand Down
26 changes: 17 additions & 9 deletions log.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,25 +7,33 @@ import (
"os"
)

func SetLogFile(file string) {
f, _ := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer f.Close()
wrt := io.MultiWriter(os.Stdout, f)
var LoggingInfo struct {
file *os.File
}

func set_log_file(path string) {
LoggingInfo.file, _ = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
wrt := io.MultiWriter(os.Stdout, LoggingInfo.file)
log.SetOutput(wrt)
}

func LogError(section string, format string, a ...any) {
log.Printf(fmt.Sprintf("(%s) %s", section, format), a...)
func close_log_file() {
LoggingInfo.file.Close()
}

func LogStatus(section string, format string, a ...any) {
func log_error(section string, format string, a ...any) {
log.Printf(fmt.Sprintf("(%s) %s", section, format), a...)
}

func LogInfo(section string, format string, a ...any) {
func log_status(section string, format string, a ...any) {
log.Printf(fmt.Sprintf("(%s) %s", section, format), a...)
}

func LogPanic(section string, format string, a ...any) {
func log_info(section string, format string, a ...any) {
//TODO: add option to enable log spam
//log.Printf(fmt.Sprintf("(%s) %s", section, format), a...)
}

func log_panic(section string, format string, a ...any) {
log.Printf(fmt.Sprintf("(%s) %s", section, format), a...)
}
4 changes: 2 additions & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
)

func main() {
SetLogFile("combcore.log")
set_log_file("combcore.log")

var err error

Expand All @@ -17,7 +17,7 @@ func main() {
btc_init()

if err = db_open(); err != nil {
LogPanic("db", "failed to open (%s)", err.Error())
log_panic("db", "failed to open (%s)", err.Error())
}

rpc_start()
Expand Down

0 comments on commit 9e7d92a

Please sign in to comment.