Skip to content

Commit

Permalink
multi: Fix several misspellings in the comments.
Browse files Browse the repository at this point in the history
Contains the following upstream commits:
- ef9c50b
- eb882f3

In addition to merging the fixes in the commits, this also fixes a few
more misspellings that were introduced in the new Decred code.
  • Loading branch information
davecgh committed May 30, 2016
2 parents 0aeccc0 + eb882f3 commit 2030b4d
Show file tree
Hide file tree
Showing 44 changed files with 105 additions and 105 deletions.
2 changes: 1 addition & 1 deletion addrmgr/addrmanager.go
Expand Up @@ -886,7 +886,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
addrKey := NetAddressKey(addr)
oldBucket := -1
for i := range a.addrNew {
// we check for existance so we can record the first one
// we check for existence so we can record the first one
if _, ok := a.addrNew[i][addrKey]; ok {
delete(a.addrNew[i], addrKey)
ka.refs--
Expand Down
2 changes: 1 addition & 1 deletion addrmgr/addrmanager_test.go
Expand Up @@ -17,7 +17,7 @@ import (
"github.com/decred/dcrd/wire"
)

// naTest is used to describe a test to be perfomed against the NetAddressKey
// naTest is used to describe a test to be performed against the NetAddressKey
// method.
type naTest struct {
in wire.NetAddress
Expand Down
2 changes: 1 addition & 1 deletion blockchain/accept.go
Expand Up @@ -91,7 +91,7 @@ func IsFinalizedTransaction(tx *dcrutil.Tx, blockHeight int64,
return true
}

// At this point, the transaction's lock time hasn't occured yet, but
// At this point, the transaction's lock time hasn't occurred yet, but
// the transaction might still be finalized if the sequence number
// for all transaction inputs is maxed out.
for _, txIn := range msgTx.TxIn {
Expand Down
2 changes: 1 addition & 1 deletion blockchain/difficulty.go
Expand Up @@ -142,7 +142,7 @@ func BigToCompact(n *big.Int) uint32 {
// CalcWork calculates a work value from difficulty bits. Decred increases
// the difficulty for generating a block by decreasing the value which the
// generated hash must be less than. This difficulty target is stored in each
// block header using a compact representation as described in the documenation
// block header using a compact representation as described in the documentation
// for CompactToBig. The main chain is selected by choosing the chain that has
// the most proof of work (highest difficulty). Since a lower target difficulty
// value equates to higher actual difficulty, the work value which will be
Expand Down
2 changes: 1 addition & 1 deletion blockchain/internal_test.go
Expand Up @@ -25,7 +25,7 @@ func TstTimeSorter(times []time.Time) sort.Interface {
}

// TstSetMaxMedianTimeEntries makes the ability to set the maximum number of
// median tiem entries available to the test package.
// median time entries available to the test package.
func TstSetMaxMedianTimeEntries(val int) {
maxMedianTimeEntries = val
}
Expand Down
14 changes: 7 additions & 7 deletions blockchain/stake/ticketdb.go
Expand Up @@ -562,7 +562,7 @@ func (tmdb *TicketDB) removeLiveTicket(ticket *TicketData) error {
"delete does not exist!", ticket.SStxHash)
}

// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision
if *tmdb.maps.ticketMap[ticket.Prefix][ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeLiveTicket: ticket " +
Expand Down Expand Up @@ -590,7 +590,7 @@ func (tmdb *TicketDB) removeSpentTicket(spendHeight int64, ticket *TicketData) e
"delete does not exist! %v", ticket.SStxHash)
}

// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision
if *tmdb.maps.spentTicketMap[spendHeight][ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeSpentTicket: ticket hash " +
Expand All @@ -616,7 +616,7 @@ func (tmdb *TicketDB) removeMissedTicket(ticket *TicketData) error {
"delete does not exist! %v", ticket.SStxHash)
}

// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision
if *tmdb.maps.missedTicketMap[ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeMissedTicket: ticket hash " +
Expand All @@ -643,7 +643,7 @@ func (tmdb *TicketDB) removeRevokedTicket(ticket *TicketData) error {
"delete does not exist! %v", ticket.SStxHash)
}

// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision.
if *tmdb.maps.revokedTicketMap[ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeRevokedTicket: ticket hash " +
Expand Down Expand Up @@ -721,7 +721,7 @@ func (tmdb *TicketDB) CheckLiveTicket(txHash chainhash.Hash) (bool, error) {
}

// CheckMissedTicket checks for the existence of a missed ticket in the missed
// ticket map. Assumes missedTicketMap is intialized.
// ticket map. Assumes missedTicketMap is initialized.
//
// This function is safe for concurrent access.
func (tmdb *TicketDB) CheckMissedTicket(txHash chainhash.Hash) bool {
Expand All @@ -735,7 +735,7 @@ func (tmdb *TicketDB) CheckMissedTicket(txHash chainhash.Hash) bool {
}

// CheckRevokedTicket checks for the existence of a revoked ticket in the
// revoked ticket map. Assumes missedTicketMap is intialized.
// revoked ticket map. Assumes missedTicketMap is initialized.
//
// This function is safe for concurrent access.
func (tmdb *TicketDB) CheckRevokedTicket(txHash chainhash.Hash) bool {
Expand Down Expand Up @@ -1306,7 +1306,7 @@ func (tmdb *TicketDB) unspendTickets(height int64) (SStxMemMap, error) {
// getNewTicketsFromHeight loads a block from leveldb and parses SStx from it using
// chain/stake's IsSStx function.
// This is intended to be used to get ticket numbers from the MAIN CHAIN as
// decribed in the DB.
// described in the DB.
// SIDE CHAIN evaluation should be instantiated in package:chain.
//
// This function MUST be called with the tmdb lock held (for reads).
Expand Down
4 changes: 2 additions & 2 deletions blockchain/validate.go
Expand Up @@ -1371,10 +1371,10 @@ func CheckTransactionInputs(tx *dcrutil.Tx, txHeight int64, txStore TxStore,
// ----------------------------------------------------------------------------

// SSTX -----------------------------------------------------------------------
// 1. Check and make sure that the output amounts in the committments to the
// 1. Check and make sure that the output amounts in the commitments to the
// ticket are correctly calculated.

// 1. Check and make sure that the output amounts in the committments to the
// 1. Check and make sure that the output amounts in the commitments to the
// ticket are correctly calculated.
isSStx, _ := stake.IsSStx(tx)
if isSStx {
Expand Down
2 changes: 1 addition & 1 deletion blockmanager.go
Expand Up @@ -2940,7 +2940,7 @@ func newBlockManager(s *server) (*blockManager, error) {
}
bmgrLog.Infof("Block index generation complete")

// Initialize the chain state now that the intial block node index has
// Initialize the chain state now that the initial block node index has
// been generated.

// Query the DB for the current winning ticket data.
Expand Down
4 changes: 2 additions & 2 deletions chainindexer.go
Expand Up @@ -33,9 +33,9 @@ const (
// When in "CatchUp" mode, incoming requests to index newly solved
// blocks are backed up for later processing. Once we've finished
// catching up, we process these queued jobs, and then enter into
// "maintainence" mode.
// "maintenance" mode.
indexCatchUp indexState = iota
// When in "maintainence" mode, we have a single worker serially
// When in "maintenance" mode, we have a single worker serially
// processing incoming jobs to index newly solved blocks.
indexMaintain
)
Expand Down
2 changes: 1 addition & 1 deletion config.go
Expand Up @@ -198,7 +198,7 @@ func supportedSubsystems() []string {
subsystems = append(subsystems, subsysID)
}

// Sort the subsytems for stable display.
// Sort the subsystems for stable display.
sort.Strings(subsystems)
return subsystems
}
Expand Down
6 changes: 3 additions & 3 deletions database/db.go
Expand Up @@ -45,7 +45,7 @@ type Db interface {

// DropAfterBlockBySha will remove any blocks from the database after
// the given block. It terminates any existing transaction and performs
// its operations in an atomic transaction which is commited before
// its operations in an atomic transaction which is committed before
// the function returns.
DropAfterBlockBySha(*chainhash.Hash) (err error)

Expand Down Expand Up @@ -126,7 +126,7 @@ type Db interface {
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// transaction which is committed before the function returns.
// Addresses are indexed by the raw bytes of their base58 decoded
// hash160.
UpdateAddrIndexForBlock(blkSha *chainhash.Hash, height int64,
Expand Down Expand Up @@ -219,7 +219,7 @@ func AddDBDriver(instance DriverDB) {
driverList = append(driverList, instance)
}

// CreateDB intializes and opens a database.
// CreateDB initializes and opens a database.
func CreateDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
for _, drv := range driverList {
if drv.DbType == dbtype {
Expand Down
2 changes: 1 addition & 1 deletion database/ldb/tx.go
Expand Up @@ -552,7 +552,7 @@ func (db *LevelDb) FetchTxsForAddr(addr dcrutil.Address, skip int,
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// transaction which is committed before the function returns.
// Transactions indexed by address are stored with the following format:
// * prefix || hash160 || blockHeight || txoffset || txlen
// Indexes are stored purely in the key, with blank data for the actual value
Expand Down
2 changes: 1 addition & 1 deletion database2/doc.go
Expand Up @@ -47,7 +47,7 @@ below.
Transactions
The Tx interface provides facilities for rolling back or commiting changes that
The Tx interface provides facilities for rolling back or committing changes that
took place while the transaction was active. It also provides the root metadata
bucket under which all keys, values, and nested buckets are stored. A
transaction can either be read-only or read-write and managed or unmanaged.
Expand Down
6 changes: 3 additions & 3 deletions database2/driver.go
Expand Up @@ -59,9 +59,9 @@ func SupportedDrivers() []string {
return supportedDBs
}

// Create intializes and opens a database for the specified type. The arguments
// are specific to the database type driver. See the documentation for the
// database driver for further details.
// Create initializes and opens a database for the specified type. The
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {
Expand Down
4 changes: 2 additions & 2 deletions database2/ffldb/interface_test.go
Expand Up @@ -330,7 +330,7 @@ func testCursorInterface(tc *testContext, bucket database.Bucket) bool {
return false
}

// Ensure foward iteration works as expected after seeking.
// Ensure forward iteration works as expected after seeking.
middleIdx := (len(sortedValues) - 1) / 2
seekKey := sortedValues[middleIdx].key
curIdx = middleIdx
Expand Down Expand Up @@ -650,7 +650,7 @@ func testMetadataManualTxInterface(tc *testContext) bool {
//
// Otherwise, a read-write transaction is created, the values are
// written, standard bucket tests for read-write transactions are
// performed, and then the transaction is either commited or rolled
// performed, and then the transaction is either committed or rolled
// back depending on the flag.
bucket1Name := []byte("bucket1")
populateValues := func(writable, rollback bool, putValues []keyPair) bool {
Expand Down
2 changes: 1 addition & 1 deletion database2/interface.go
Expand Up @@ -451,7 +451,7 @@ type DB interface {
// Update invokes the passed function in the context of a managed
// read-write transaction. Any errors returned from the user-supplied
// function will cause the transaction to be rolled back and are
// returned from this function. Otherwise, the transaction is commited
// returned from this function. Otherwise, the transaction is committed
// when the user-supplied function returns a nil error.
//
// Calling Rollback or Commit on the transaction passed to the
Expand Down
5 changes: 3 additions & 2 deletions dcrec/edwards/ciphering.go
Expand Up @@ -61,8 +61,9 @@ func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
// HMAC [32]byte
// }
//
// The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly,
// refer to section 5.8.1 of ANSI X9.63 for rationale on this format.
// The primary aim is to ensure byte compatibility with Pyelliptic.
// Additionally, refer to section 5.8.1 of ANSI X9.63 for rationale on this
// format.
func Encrypt(curve *TwistedEdwardsCurve, pubkey *PublicKey, in []byte) ([]byte,
error) {
ephemeral, err := GeneratePrivateKey(curve)
Expand Down
2 changes: 1 addition & 1 deletion dcrec/edwards/threshold.go
Expand Up @@ -54,7 +54,7 @@ func CombinePubkeys(curve *TwistedEdwardsCurve,
}

// generateNoncePair deterministically generate a nonce pair for use in
// partial signing of a message. Returns a public key (nonce to disseminate)
// partial signing of a message. Returns a public key (nonce to dissemanate)
// and a private nonce to keep as a secret for the signer.
func generateNoncePair(curve *TwistedEdwardsCurve, msg []byte, priv []byte,
nonceFunction func(*TwistedEdwardsCurve, []byte, []byte, []byte,
Expand Down
4 changes: 2 additions & 2 deletions dcrec/secp256k1/ciphering.go
Expand Up @@ -66,8 +66,8 @@ func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
// HMAC [32]byte
// }
//
// The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly,
// refer to section 5.8.1 of ANSI X9.63 for rationale on this format.
// The primary aim is to ensure byte compatibility with Pyelliptic. Also, refer
// to section 5.8.1 of ANSI X9.63 for rationale on this format.
func Encrypt(pubkey *PublicKey, in []byte) ([]byte, error) {
ephemeral, err := GeneratePrivateKey(S256())
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion dcrec/secp256k1/field.go
Expand Up @@ -42,7 +42,7 @@ package secp256k1
// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a
// reasonable choice for #2
// 4) Given the need for 256-bits of precision and the properties stated in #1,
// #2, and #3, the representation which best accomodates this is 10 uint32s
// #2, and #3, the representation which best accommodates this is 10 uint32s
// with base 2^26 (26 bits * 10 = 260 bits, so the final word only needs 22
// bits) which leaves the desired 64 bits (32 * 10 = 320, 320 - 256 = 64) for
// overflow
Expand Down
2 changes: 1 addition & 1 deletion dcrec/secp256k1/pubkey.go
Expand Up @@ -27,7 +27,7 @@ func isOdd(a *big.Int) bool {
// the solution to use.
func DecompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) {
// TODO(oga) This will probably only work for secp256k1 due to
// optimisations.
// optimizations.

// Y = +-sqrt(x^3 + B)
x3 := new(big.Int).Mul(x, x)
Expand Down
2 changes: 1 addition & 1 deletion dcrec/secp256k1/schnorr/threshold.go
Expand Up @@ -70,7 +70,7 @@ func nonceRFC6979(privkey []byte, hash []byte, extra []byte,
}

// generateNoncePair deterministically generate a nonce pair for use in
// partial signing of a message. Returns a public key (nonce to disseminate)
// partial signing of a message. Returns a public key (nonce to dissemanate)
// and a private nonce to keep as a secret for the signer.
func generateNoncePair(curve *secp256k1.KoblitzCurve, msg []byte, priv []byte,
nonceFunction func([]byte, []byte, []byte, []byte) []byte, extra []byte,
Expand Down
6 changes: 3 additions & 3 deletions dcrjson/chainsvrresults.go
Expand Up @@ -244,15 +244,15 @@ type GetNetTotalsResult struct {
TimeMillis int64 `json:"timemillis"`
}

// ScriptSig models a signature script. It is defined seperately since it only
// ScriptSig models a signature script. It is defined separately since it only
// applies to non-coinbase. Therefore the field in the Vin structure needs
// to be a pointer.
type ScriptSig struct {
Asm string `json:"asm"`
Hex string `json:"hex"`
}

// Vin models parts of the tx data. It is defined seperately since
// Vin models parts of the tx data. It is defined separately since
// getrawtransaction, decoderawtransaction, and searchrawtransaction use the
// same structure.
type Vin struct {
Expand Down Expand Up @@ -375,7 +375,7 @@ func (v *VinPrevOut) MarshalJSON() ([]byte, error) {
return json.Marshal(txStruct)
}

// Vout models parts of the tx data. It is defined seperately since both
// Vout models parts of the tx data. It is defined separately since both
// getrawtransaction and decoderawtransaction use the same structure.
type Vout struct {
Value float64 `json:"value"`
Expand Down
2 changes: 1 addition & 1 deletion dcrjson/cmdinfo.go
Expand Up @@ -66,7 +66,7 @@ func subStructUsage(structType reflect.Type) string {
}

// Create the name/value entry for the field while considering
// the type of the field. Not all possibile types are covered
// the type of the field. Not all possible types are covered
// here and when one of the types not specifically covered is
// encountered, the field name is simply reused for the value.
fieldName := strings.ToLower(rtf.Name)
Expand Down
2 changes: 1 addition & 1 deletion dcrjson/cmdinfo_test.go
Expand Up @@ -12,7 +12,7 @@ import (
"github.com/decred/dcrd/dcrjson"
)

// TestCmdMethod tests the CmdMethod function to ensure it retuns the expected
// TestCmdMethod tests the CmdMethod function to ensure it returns the expected
// methods and errors.
func TestCmdMethod(t *testing.T) {
t.Parallel()
Expand Down
2 changes: 1 addition & 1 deletion dcrjson/register_test.go
Expand Up @@ -254,7 +254,7 @@ func TestRegisteredCmdMethods(t *testing.T) {
t.Fatal("RegisteredCmdMethods: no methods")
}

// Ensure the returnd methods are sorted.
// Ensure the returned methods are sorted.
sortedMethods := make([]string, len(methods))
copy(sortedMethods, methods)
sort.Sort(sort.StringSlice(sortedMethods))
Expand Down
2 changes: 1 addition & 1 deletion discovery.go
Expand Up @@ -50,7 +50,7 @@ var (
)

// torLookupIP uses Tor to resolve DNS via the SOCKS extension they provide for
// resolution over the Tor network. Tor itself doesnt support ipv6 so this
// resolution over the Tor network. Tor itself doesn't support ipv6 so this
// doesn't either.
func torLookupIP(host, proxy string) ([]net.IP, error) {
conn, err := net.Dial("tcp", proxy)
Expand Down
2 changes: 1 addition & 1 deletion dynamicbanscore.go
Expand Up @@ -125,7 +125,7 @@ func (s *dynamicBanScore) int(t time.Time) uint32 {

// increase increases the persistent, the decaying or both scores by the values
// passed as parameters. The resulting score is calculated as if the action was
// carried out at the point time represented by the third paramter. The
// carried out at the point time represented by the third parameter. The
// resulting score is returned.
//
// This function is not safe for concurrent access.
Expand Down
4 changes: 2 additions & 2 deletions log.go
Expand Up @@ -30,7 +30,7 @@ const (
maxRejectReasonLen = 250
)

// Loggers per subsytem. Note that backendLog is a seelog logger that all of
// Loggers per subsystem. Note that backendLog is a seelog logger that all of
// the subsystem loggers route their messages to. When adding new subsystems,
// add a reference here, to the subsystemLoggers map, and the useLogger
// function.
Expand Down Expand Up @@ -146,7 +146,7 @@ func useLogger(subsystemID string, logger btclog.Logger) {
}

// initSeelogLogger initializes a new seelog logger that is used as the backend
// for all logging subsytems.
// for all logging subsystems.
func initSeelogLogger(logFile string) {
config := `
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000"
Expand Down

0 comments on commit 2030b4d

Please sign in to comment.