From cecc510b8be500c1c38e952fe6768c3d7bbb1435 Mon Sep 17 00:00:00 2001 From: hoangdv2429 Date: Sun, 10 Dec 2023 22:38:16 +0700 Subject: [PATCH 1/4] add vendor to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 7d7c0b98e3..098978578c 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ square/testdata **/*.html .release-env **/*.DS_Store +vendor \ No newline at end of file From de5f1830bdbdb1102453c475068ce10d4cc6e058 Mon Sep 17 00:00:00 2001 From: hoangdv2429 Date: Tue, 12 Dec 2023 22:38:16 +0700 Subject: [PATCH 2/4] typos --- app/ante/ante.go | 2 +- app/process_proposal.go | 2 +- app/test/fuzz_abci_test.go | 2 +- docs/architecture/adr-006-non-interactive-defaults.md | 2 +- docs/architecture/adr-012-sequence-length-encoding.md | 6 +++--- docs/architecture/adr-014-versioned-namespaces.md | 6 +++--- docs/architecture/adr-015-namespace-id-size.md | 6 +++--- docs/architecture/adr-018-network-upgrades.md | 2 +- docs/architecture/adr-019-strict-inflation-schedule.md | 2 +- .../adr-020-deterministic-square-construction.md | 4 ++-- pkg/appconsts/global_consts.go | 4 ++-- pkg/inclusion/nmt_caching.go | 6 +++--- pkg/shares/namespace.go | 2 +- pkg/square/builder.go | 2 +- pkg/square/square_test.go | 2 +- pkg/user/signer.go | 2 +- proto/celestia/core/v1/da/data_availability_header.pb.go | 2 +- proto/celestia/core/v1/da/data_availability_header.proto | 2 +- proto/tendermint/crypto/proof.proto | 2 +- scripts/build-run-single-node.sh | 2 +- specs/src/specs/block_validity_rules.md | 2 +- specs/src/specs/data_structures.md | 4 ++-- specs/src/specs/params.md | 2 +- test/testground/README.md | 4 ++-- test/testground/manifest.toml | 2 +- test/testground/network/configurators.go | 2 +- test/testground/network/consensus_node.go | 2 +- test/txsim/send.go | 2 +- test/util/malicious/hasher.go | 4 ++-- test/util/malicious/test_app.go | 2 +- x/blob/README.md | 4 ++-- x/blob/types/payforblob.go | 2 +- x/blobstream/README.md | 2 +- x/mint/types/constants.go | 2 +- 34 files changed, 48 insertions(+), 48 deletions(-) diff --git a/app/ante/ante.go b/app/ante/ante.go index 1988d02158..f263159932 100644 --- a/app/ante/ante.go +++ b/app/ante/ante.go @@ -59,7 +59,7 @@ func NewAnteHandler( blobante.NewMinGasPFBDecorator(blobKeeper), // Ensure that the tx's total blob size is <= the max blob size. blobante.NewMaxBlobSizeDecorator(blobKeeper), - // Ensure that tx's with a MsgSubmitProposal have atleast one proposal + // Ensure that tx's with a MsgSubmitProposal have at least one proposal // message. NewGovProposalDecorator(), // Side effect: increment the nonce for all tx signers. diff --git a/app/process_proposal.go b/app/process_proposal.go index 68f7048058..8a2fe77a7a 100644 --- a/app/process_proposal.go +++ b/app/process_proposal.go @@ -77,7 +77,7 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) (resp abci.Resp // we need to increment the sequence for every transaction so that // the signature check below is accurate. this error only gets hit - // if the account in question doens't exist. + // if the account in question doesn't exist. sdkCtx, err = handler(sdkCtx, sdkTx, false) if err != nil { logInvalidPropBlockError(app.Logger(), req.Header, "failure to increment sequence", err) diff --git a/app/test/fuzz_abci_test.go b/app/test/fuzz_abci_test.go index e7b933a35d..b10cbe3501 100644 --- a/app/test/fuzz_abci_test.go +++ b/app/test/fuzz_abci_test.go @@ -40,7 +40,7 @@ func TestPrepareProposalConsistency(t *testing.T) { iterations int } tests := []test{ - // running these tests more than once in CI will sometimes timout, so we + // running these tests more than once in CI will sometimes timeout, so we // have to run them each once per square size. However, we can run these // more locally by increasing the iterations. {"many small single share single blob transactions", 1000, 1, 400, 1}, diff --git a/docs/architecture/adr-006-non-interactive-defaults.md b/docs/architecture/adr-006-non-interactive-defaults.md index 9715234051..5ee230ec4b 100644 --- a/docs/architecture/adr-006-non-interactive-defaults.md +++ b/docs/architecture/adr-006-non-interactive-defaults.md @@ -289,7 +289,7 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr The first major change is that we are making use of an intermediate data structure. It contains fields that are progressively and optionally used during the malleation process. This makes it easier to keep track of malleated transactions and their messages, prune transactions in the case that we go over the max square size, cache the decoded transactions avoiding excessive deserialization, and add metadata to malleated transactions after we malleate them. All while preserving the original ordering (from the prioritized mempool) of the transactions. ```go -// parsedTx is an interanl struct that keeps track of potentially valid txs and +// parsedTx is an internal struct that keeps track of potentially valid txs and // their wire messages if they have any. type parsedTx struct { // the original raw bytes of the tx diff --git a/docs/architecture/adr-012-sequence-length-encoding.md b/docs/architecture/adr-012-sequence-length-encoding.md index 3494ecbed6..59825bd355 100644 --- a/docs/architecture/adr-012-sequence-length-encoding.md +++ b/docs/architecture/adr-012-sequence-length-encoding.md @@ -71,7 +71,7 @@ Cons ## Option E: Extend protobuf and introduce a fixed16 type -Big endian uint32 seems equivalant to protobuf fixed32 but there is no fixed16. This option adds a fixed16 type to protobuf so that we can encode the sequence length as a fixed32 and the reserved bytes as a fixed16. +Big endian uint32 seems equivalent to protobuf fixed32 but there is no fixed16. This option adds a fixed16 type to protobuf so that we can encode the sequence length as a fixed32 and the reserved bytes as a fixed16. ## Table @@ -81,7 +81,7 @@ Big endian uint32 seems equivalant to protobuf fixed32 but there is no fixed16. | Option B | 4 byte padded varint | 2 byte padded varint | | Option C | 4 byte big endian uint32 | 2 byte padded varint | | Option D | 4 byte big endian uint32 | 4 byte big endian uint32 | -| Option E | 4 byte big endian uint32 (equivalant to protobuf fixed32) | 2 byte protobuf fixed16 (doesn't exist) | +| Option E | 4 byte big endian uint32 (equivalent to protobuf fixed32) | 2 byte protobuf fixed16 (doesn't exist) | ## Decision @@ -96,7 +96,7 @@ Option D ### Neutral - All options retain the need for other language implementations to parse varints because the length delimiter that is prefixed to units in a compact share (e.g. a transaction) is still a varint. -- This document assumes that an encoded big endian uint32 is equivalant to a protobuf fixed32 +- This document assumes that an encoded big endian uint32 is equivalent to a protobuf fixed32 ## References diff --git a/docs/architecture/adr-014-versioned-namespaces.md b/docs/architecture/adr-014-versioned-namespaces.md index 778d8ab406..ca9dba8d88 100644 --- a/docs/architecture/adr-014-versioned-namespaces.md +++ b/docs/architecture/adr-014-versioned-namespaces.md @@ -51,13 +51,13 @@ An approach that addresses these issues is to prefix the namespace ID with versi | Namespace Version | 1 | the version of the namespace ID | | Namespace ID | 8 if Namespace Version=0, 32 if Namespace Version=1 | namespace ID of the share | -For example, consider the scenario where at mainnet launch blobs are layed out according to the existing non-interactive default rules. In this scenario, blobs always start at an index aligned with the `BlobMinSquareSize`. The only supported namespace ID is `0`. At some point in the future, if we introduce new non-interactive default rules (e.g. [celestia-app#1161](https://github.com/celestiaorg/celestia-app/pull/1161)), we may also expand the range of available namespaces to include namespaces that start with a leading `0` or `1` byte. Users may opt in to using the new non-interactive default rules by submitting PFB transactions with a namespace ID version of `1`. +For example, consider the scenario where at mainnet launch blobs are laid out according to the existing non-interactive default rules. In this scenario, blobs always start at an index aligned with the `BlobMinSquareSize`. The only supported namespace ID is `0`. At some point in the future, if we introduce new non-interactive default rules (e.g. [celestia-app#1161](https://github.com/celestiaorg/celestia-app/pull/1161)), we may also expand the range of available namespaces to include namespaces that start with a leading `0` or `1` byte. Users may opt in to using the new non-interactive default rules by submitting PFB transactions with a namespace ID version of `1`. - When the namespace starts with `0`, all blobs in the namespace conform to the previous set of non-interactive default rules. - When a namespace starts with `1`, all blobs in the namespace conform to the new set of non-interactive default rules. ```go -optionA := []byte{ +optional := []byte{ 0, // namespace version 1, 2, 3, 4, 5, 6, 7, 8, // namespace ID 1, // info byte (sequence start indicator = true) @@ -159,7 +159,7 @@ When a user creates a PFB, concatenate the namespace version with the namespace 1. Option 1: when there are changes to the universal share prefix 2. Option 2: when there are changes to any part of the remaining data in a share 3. When do we expect to increment the namespace version? - 1. During a backwards incompatable non-interactive default rule change + 1. During a backwards incompatible non-interactive default rule change 2. If we change the format of a padding share (e.g. a namespace padding share) instead of `0` bytes, pad with something else like. We may need to preserve backwards compatibility for padding shares that use old namespaces. Note this scenario likely implies a namespace version and share version increase. 3. Change the format of PFB tx serialization. This scenario likely implies duplicating the PFB txs in a data square, one with the old namespace version and one with the new namespace version. 4. Inspired by [type-length-value](https://en.wikipedia.org/wiki/Type%E2%80%93length%E2%80%93value), should we consider prefixing optional fields (sequence length and reserved bytes) with a type and a length? This would enable us to modify those fields without introducing new share versions. diff --git a/docs/architecture/adr-015-namespace-id-size.md b/docs/architecture/adr-015-namespace-id-size.md index 5e0827acee..8801de29da 100644 --- a/docs/architecture/adr-015-namespace-id-size.md +++ b/docs/architecture/adr-015-namespace-id-size.md @@ -46,7 +46,7 @@ Users will specify a version (1 byte) and a ID (28 bytes) in their PFB. Addition ## Desirable criteria 1. A user should be able to randomly generate a namespace that hasn't been used before[^1] -2. There should exist a large enough namespace ID space for all rollups that may exist in the forseeable future (e.g. 100 years) +2. There should exist a large enough namespace ID space for all rollups that may exist in the foreseeable future (e.g. 100 years) ### Criteria 1 @@ -70,7 +70,7 @@ Namespace ID size (bytes) | 1 billion (10^9) | 1 trillion (10^12) | 1 quadrillio > As a rule of thumb, a hash function with range of size N can hash on the order of sqrt(N) values before running into collisions.[^4] -Namespace ID size (bytes) | Hash funciton range | Can hash this many items before running into collision +Namespace ID size (bytes) | Hash function range | Can hash this many items before running into collision --------------------------|---------------------|------------------------------------------------------- 8 | 2^64 | 2^32 = ~4 billion items 16 | 2^128 | 2^64 = ~1 quintillion items @@ -141,7 +141,7 @@ Another tradeoff to consider is the size of the namespace in the share. Since a ### Maximum blob size -If the namespace size is increased, the maximum possible blob will decrease. Given the maximum possible blob is bounded by the number of bytes available for blob space in a data square, if a 32 byte namespace size is adopted, the maxmimum blob size will decrease by an upper bound of `appconsts.MaxSquareSize * appconsts.MaxSquareSize * (32-8)`. Note this is an upper bound because not all shares in the data square can be used for blob data (i.e. at least one share must contain the associated PayForBlob transaction). +If the namespace size is increased, the maximum possible blob will decrease. Given the maximum possible blob is bounded by the number of bytes available for blob space in a data square, if a 32 byte namespace size is adopted, the maximum blob size will decrease by an upper bound of `appconsts.MaxSquareSize * appconsts.MaxSquareSize * (32-8)`. Note this is an upper bound because not all shares in the data square can be used for blob data (i.e. at least one share must contain the associated PayForBlob transaction). ### SHA256 performance diff --git a/docs/architecture/adr-018-network-upgrades.md b/docs/architecture/adr-018-network-upgrades.md index f9b0d84682..bdba2ba244 100644 --- a/docs/architecture/adr-018-network-upgrades.md +++ b/docs/architecture/adr-018-network-upgrades.md @@ -47,7 +47,7 @@ Given this, a node can at any time spin up a v2 binary which will immediately be The height of the upgrades will initially be hard coded into the binary. This will consist of a mapping from chain ID to app version to a range of heights that will be loaded by the application into working memory whenever the node begins and supplied directly to the `upgrades` module which will be responsible for scheduling. The chainID is required as we expect the same binary to be used across testnets and mainnet. There are a few considerations that shape how this system will work: - Upgrading needs to support state migrations. These must happen to all nodes at the same moment between heights. Ideally all migrations that affect state would correspond at the height of the new app version i.e. after `Commit` and before processing of the transactions at that height. `BeginBlock` seems like an ideal area to perform these upgrades however these might affect the way that `PrepareProposal` and `ProcessProposal` is conducted thus they must be performed even prior to these ABCI calls. A simpler implementation would have been for the proposer to immediately propose a block with the next version i.e. v2. However that would require the proposer to first migrate state (taking an unknown length of time) and for the validators receiving that proposal to first migrate before validating and given that the upgrade is not certain, there would need to be a mechanism to migrate back to v1 (NOTE: this remains the case if we wish to support downgrading which is discussed later). To overcome these requirements, the proposer must signal in the prior height the intention to upgrade to a new version. This is done with a new message type, `MsgVersionChange`, which must be put as the first transaction in the block. Validators read this and if they are in agreement to supporting the version change they vote on the block accordingly. If the block reaches consensus then all validators will update the app version at `EndBlock`. CometBFT will then propose the next block using that version. Nodes that have not upgraded and don't support the binary will error and exit. Given that the previous block was approved by more than 2/3 of the network we have a strong guarantee that this block will be accepted by the network. However, it's worth noting that given a security model that must withstand 1/3 byzantine nodes, even a single byzantine node that voted for the upgrade yet doesn't vote for the following block can stall the network until > 2/3 nodes upgrade and vote on the following block. -- Given uncertainty in scheduling, the system must be able to handle changes to the upgrade height that most commonly would come in the form of delays. Embedding the upgrade schedule in the binary is convenient for node operators and avoids the possibility for user errors. However, binaries are static. If the community wished to push back the upgrade by two weeks there is the possibility that some nodes would not rerun the new binary thus we'd get a split between nodes running the old schedule and nodes running the new schedule. To overcome this, proposers will only propose a version change in the first round of each height, thus allowing transactions to still be committed even under circumstances where there is no consensus on upgrading. Secondly, we define a range in which nodes will attempt to upgrade the app version and failing this will continue to run the current version. Lastly, the binary will have the ability to manually specify the app version height mapping and overide the built-in values either through a flag or in the `app.toml` config. This is expected to be used in testing and in emergency situations only. Another example to keep in mind is if a quorum outright rejects an upgrade. If some of the validators are for the change they should have some way to continue participating in the network. Therefore we employ a range that nodes will attempt to upgrade and afterwards will continue on normally with the new binary however running the older version. +- Given uncertainty in scheduling, the system must be able to handle changes to the upgrade height that most commonly would come in the form of delays. Embedding the upgrade schedule in the binary is convenient for node operators and avoids the possibility for user errors. However, binaries are static. If the community wished to push back the upgrade by two weeks there is the possibility that some nodes would not rerun the new binary thus we'd get a split between nodes running the old schedule and nodes running the new schedule. To overcome this, proposers will only propose a version change in the first round of each height, thus allowing transactions to still be committed even under circumstances where there is no consensus on upgrading. Secondly, we define a range in which nodes will attempt to upgrade the app version and failing this will continue to run the current version. Lastly, the binary will have the ability to manually specify the app version height mapping and override the built-in values either through a flag or in the `app.toml` config. This is expected to be used in testing and in emergency situations only. Another example to keep in mind is if a quorum outright rejects an upgrade. If some of the validators are for the change they should have some way to continue participating in the network. Therefore we employ a range that nodes will attempt to upgrade and afterwards will continue on normally with the new binary however running the older version. - The system needs to be tolerant of unexpected faults in the upgrade process. This can be: - The community/contributors realise there is a bug in the new version after the binary has been released. Node operators will need to downgrade back to the previous version and restart their node. - There is a halting bug in the migration or in processing of the first transactions. This most likely would be in the form of an apphash mismatch. This becomes more problematic with delayed execution as the block (with v2 transactions) has already been committed. Immediate execution has the advantage of the apphash mismatch being realised before the data is committed. It's still however feasible to over come this but it involves nodes rolling back the previous state and re-exectuing the transactions using the v1 state machine (which will skip over the v2 transactions). This means node operators should be able to manually override the app version that the proposer will propose with. Lastly, if state migrations occurred between v2 and v1, a reverse migration would need to be performed which would make things especially difficult. If we are unable to fallback to the previous version and continue then the other option is to remain halted until the bug is patched and the network can update and continue diff --git a/docs/architecture/adr-019-strict-inflation-schedule.md b/docs/architecture/adr-019-strict-inflation-schedule.md index 257128da88..8102ad56aa 100644 --- a/docs/architecture/adr-019-strict-inflation-schedule.md +++ b/docs/architecture/adr-019-strict-inflation-schedule.md @@ -23,7 +23,7 @@ In contrast to a flexible inflation rate, Celestia intends on having a predictab | Target inflation | 1.50 | When the target inflation is reached, it remains at that rate. -The table below depicts the inflation rate for the forseeable future: +The table below depicts the inflation rate for the foreseeable future: | Year | Inflation (%) | |------|-------------------| diff --git a/docs/architecture/adr-020-deterministic-square-construction.md b/docs/architecture/adr-020-deterministic-square-construction.md index b5e74a6090..6fd2493961 100644 --- a/docs/architecture/adr-020-deterministic-square-construction.md +++ b/docs/architecture/adr-020-deterministic-square-construction.md @@ -13,7 +13,7 @@ Implemented in The current protocol around the construction of an original data square (ODS) is based around a set of constraints that are enforced during consensus through validation (See `ProcessProposal`). Block proposers are at liberty to choosing not only what transactions are included and in what order but can effectively decide on the amount of padding (i.e. where each blob is located in the square) and the size of the square. This degree of control leaks needless complexity to users with little upside and allows for adverse behaviour. -Earlier designs were incorporated around the notion of interaction between the block proposer and the transaction submitter. A user that wanted to submit a PFB would go to a potential block proposer, provide them with the transaction, the proposer would then reserve a position in the square for the transaction and finally the transaction submitter would sign the transaction with the provided share index. However, Celestia may have 100 potential block proposers which are often hidden from the network. Furthermore, tranasctions often reach a block proposer through a gossip network, severing the ability for the block proposer to directly communicate with the transaction submitter. Lastly, new transactions with greater fees might arrive causing the block proposer to want to shuffle the transactions around in the square. The response to these problems was to come up with "non-interactive defaults" (first mentioned in [ADR006](./adr-006-non-interactive-defaults.md)). +Earlier designs were incorporated around the notion of interaction between the block proposer and the transaction submitter. A user that wanted to submit a PFB would go to a potential block proposer, provide them with the transaction, the proposer would then reserve a position in the square for the transaction and finally the transaction submitter would sign the transaction with the provided share index. However, Celestia may have 100 potential block proposers which are often hidden from the network. Furthermore, transactions often reach a block proposer through a gossip network, severing the ability for the block proposer to directly communicate with the transaction submitter. Lastly, new transactions with greater fees might arrive causing the block proposer to want to shuffle the transactions around in the square. The response to these problems was to come up with "non-interactive defaults" (first mentioned in [ADR006](./adr-006-non-interactive-defaults.md)). ## Decision @@ -25,7 +25,7 @@ Square construction is thus to be reduced to the simple deterministic function: func ConstructSquare(txs []Tx) []Share ``` -and it's couterpart +and it's counterpart ```go func DeconstructSquare(shares []Share) []Tx diff --git a/pkg/appconsts/global_consts.go b/pkg/appconsts/global_consts.go index 19c83eb2cf..7ca564a0fe 100644 --- a/pkg/appconsts/global_consts.go +++ b/pkg/appconsts/global_consts.go @@ -29,7 +29,7 @@ const ( ShareSize = 512 // ShareInfoBytes is the number of bytes reserved for information. The info - // byte contains the share version and a sequence start idicator. + // byte contains the share version and a sequence start indicator. ShareInfoBytes = 1 // SequenceLenBytes is the number of bytes reserved for the sequence length @@ -78,7 +78,7 @@ const ( ) var ( - // DataCommitmentBlocksLimit is the maximnum number of blocks that a data commitment can span + // DataCommitmentBlocksLimit is the maximum number of blocks that a data commitment can span DataCommitmentBlocksLimit = consts.DataCommitmentBlocksLimit // NewBaseHashFunc is the base hash function used by NMT. Change accordingly diff --git a/pkg/inclusion/nmt_caching.go b/pkg/inclusion/nmt_caching.go index 82958c08e6..18aed44cfb 100644 --- a/pkg/inclusion/nmt_caching.go +++ b/pkg/inclusion/nmt_caching.go @@ -30,7 +30,7 @@ func newSubTreeRootCacher() *subTreeRootCacher { return &subTreeRootCacher{cache: make(map[string][2]string)} } -// Visit fullfills the nmt.NodeVisitorFn function definition. It stores each inner +// Visit fulfills the nmt.NodeVisitorFn function definition. It stores each inner // node in a simple map, which can later be used to walk the tree. This function // is called by the nmt when calculating the root. func (strc *subTreeRootCacher) Visit(hash []byte, children ...[]byte) { @@ -57,7 +57,7 @@ func (strc subTreeRootCacher) walk(root []byte, path []WalkInstruction) ([]byte, // try to lookup the provided sub root children, has := strc.cache[string(root)] if !has { - // note: we might want to consider panicing here + // note: we might want to consider panicking here return nil, fmt.Errorf("did not find sub tree root: %v", root) } @@ -92,7 +92,7 @@ func NewSubtreeCacher(squareSize uint64) *EDSSubTreeRootCacher { } } -// Constructor fullfills the rsmt2d.TreeCreatorFn by keeping a pointer to the +// Constructor fulfills the rsmt2d.TreeCreatorFn by keeping a pointer to the // cache and embedding it as a nmt.NodeVisitor into a new wrapped nmt. func (stc *EDSSubTreeRootCacher) Constructor(axis rsmt2d.Axis, axisIndex uint) rsmt2d.Tree { // see docs of counter field for more diff --git a/pkg/shares/namespace.go b/pkg/shares/namespace.go index d5026c575c..e82a5b8dcd 100644 --- a/pkg/shares/namespace.go +++ b/pkg/shares/namespace.go @@ -9,7 +9,7 @@ import ( // GetShareRangeForNamespace returns all shares that belong to a given // namespace. It will return an empty range if the namespace could not be // found. This assumes that the slice of shares are lexicographically -// sorted by namespace. Ranges here are always end exlusive. +// sorted by namespace. Ranges here are always end exclusive. func GetShareRangeForNamespace(shares []Share, ns namespace.Namespace) (Range, error) { if len(shares) == 0 { return EmptyRange(), nil diff --git a/pkg/square/builder.go b/pkg/square/builder.go index 57ba035544..d7beb9829c 100644 --- a/pkg/square/builder.go +++ b/pkg/square/builder.go @@ -379,7 +379,7 @@ func newElement(blob *blob.Blob, pfbIndex, blobIndex, subtreeRootThreshold int) BlobIndex: blobIndex, NumShares: numShares, // - // For cacluating the maximum possible padding consider the following tree + // For calculating the maximum possible padding consider the following tree // where each leaf corresponds to a share. // // Depth Position diff --git a/pkg/square/square_test.go b/pkg/square/square_test.go index b3beb7dcb0..f92da1f966 100644 --- a/pkg/square/square_test.go +++ b/pkg/square/square_test.go @@ -35,7 +35,7 @@ func TestSquareConstruction(t *testing.T) { require.NoError(t, err) sendTxs := blobfactory.GenerateManyRawSendTxs(signer, 250) pfbTxs := blobfactory.RandBlobTxs(signer, rand, 10000, 1, 1024) - t.Run("normal transactions after PFB trasactions", func(t *testing.T) { + t.Run("normal transactions after PFB transactions", func(t *testing.T) { txs := append(sendTxs[:5], append(pfbTxs, sendTxs[5:]...)...) _, err := square.Construct(coretypes.Txs(txs).ToSliceOfBytes(), appconsts.LatestVersion, appconsts.DefaultSquareSizeUpperBound) require.Error(t, err) diff --git a/pkg/user/signer.go b/pkg/user/signer.go index a6cbd7a41e..8f08b3f993 100644 --- a/pkg/user/signer.go +++ b/pkg/user/signer.go @@ -267,7 +267,7 @@ func (s *Signer) PubKey() cryptotypes.PubKey { return s.pk } -// GetSequencer gets the lastest signed sequnce and increments the local sequence number +// GetSequencer gets the lastest signed sequence and increments the local sequence number func (s *Signer) GetSequence() uint64 { s.mtx.Lock() defer s.mtx.Unlock() diff --git a/proto/celestia/core/v1/da/data_availability_header.pb.go b/proto/celestia/core/v1/da/data_availability_header.pb.go index 124043cbdf..5ab044f3a9 100644 --- a/proto/celestia/core/v1/da/data_availability_header.pb.go +++ b/proto/celestia/core/v1/da/data_availability_header.pb.go @@ -24,7 +24,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // DataAvailabilityHeader contains the row and column roots of the erasure // coded version of the data in Block.Data. -// Therefor the original Block.Data is arranged in a +// Therefore the original Block.Data is arranged in a // k × k matrix, which is then "extended" to a // 2k × 2k matrix applying multiple times Reed-Solomon encoding. // For details see Section 5.2: https://arxiv.org/abs/1809.09044 diff --git a/proto/celestia/core/v1/da/data_availability_header.proto b/proto/celestia/core/v1/da/data_availability_header.proto index aef7c6bbbd..19a4b7d3ce 100644 --- a/proto/celestia/core/v1/da/data_availability_header.proto +++ b/proto/celestia/core/v1/da/data_availability_header.proto @@ -5,7 +5,7 @@ option go_package = "github.com/celestiaorg/celestia-app/proto/celestia/core/v1/ // DataAvailabilityHeader contains the row and column roots of the erasure // coded version of the data in Block.Data. -// Therefor the original Block.Data is arranged in a +// Therefore the original Block.Data is arranged in a // k × k matrix, which is then "extended" to a // 2k × 2k matrix applying multiple times Reed-Solomon encoding. // For details see Section 5.2: https://arxiv.org/abs/1809.09044 diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto index 58657a7852..324b5f3d5e 100644 --- a/proto/tendermint/crypto/proof.proto +++ b/proto/tendermint/crypto/proof.proto @@ -27,7 +27,7 @@ message DominoOp { } // ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data +// The data could be arbitrary format, providing necessary data // for example neighbouring node hash message ProofOp { string type = 1; diff --git a/scripts/build-run-single-node.sh b/scripts/build-run-single-node.sh index 7af3124053..60d9fd1031 100755 --- a/scripts/build-run-single-node.sh +++ b/scripts/build-run-single-node.sh @@ -16,7 +16,7 @@ CHAINID="private" coins="1000000000000000utia" ${BIN_PATH} init $CHAINID --chain-id $CHAINID --home ${HOME_DIR} ${BIN_PATH} keys add validator --keyring-backend="test" --home ${HOME_DIR} -# this won't work because the some proto types are decalared twice and the logs output to stdout (dependency hell involving iavl) +# this won't work because the some proto types are declared twice and the logs output to stdout (dependency hell involving iavl) ${BIN_PATH} add-genesis-account $(${BIN_PATH} keys show validator -a --keyring-backend="test" --home ${HOME_DIR}) $coins --home ${HOME_DIR} ${BIN_PATH} gentx validator 5000000000utia \ --keyring-backend="test" \ diff --git a/specs/src/specs/block_validity_rules.md b/specs/src/specs/block_validity_rules.md index 37f1360942..86718b38aa 100644 --- a/specs/src/specs/block_validity_rules.md +++ b/specs/src/specs/block_validity_rules.md @@ -35,7 +35,7 @@ availabily by simply downloading the entire block. > with Dishonest Majorities"](https://arxiv.org/abs/1809.09044) and in the > [`celestia-node`](https://github.com/celestiaorg/celestia-node) repo. -Celestia specifc validity rules can be categorized into two groups: +Celestia specific validity rules can be categorized into two groups: ### Transaction Validity Rules diff --git a/specs/src/specs/data_structures.md b/specs/src/specs/data_structures.md index 7bb36e6095..0315633a63 100644 --- a/specs/src/specs/data_structures.md +++ b/specs/src/specs/data_structures.md @@ -335,7 +335,7 @@ Then, 1. For each of `transactionData`, `intermediateStateRootData`, PayForBlob transactions, [serialize](#serialization): 1. For each request in the list: 1. [Serialize](#serialization) the request (individually). - 1. Compute the length of each serialized request, [serialize the length](#serialization), and pre-pend the serialized request with its serialized length. + 1. Compute the length of each serialized request, [serialize the length](#serialization), and prepend the serialized request with its serialized length. 1. Split up the length/request pairs into [`SHARE_SIZE`](./consensus.md#constants)`-`[`NAMESPACE_ID_BYTES`](./consensus.md#constants)`-`[`SHARE_RESERVED_BYTES`](./consensus.md#constants)-byte chunks. 1. Create a [share](./shares.md) out of each chunk. This data has a _reserved_ namespace ID, so the first [`NAMESPACE_SIZE`](./consensus.md#constants)`+`[`SHARE_RESERVED_BYTES`](./consensus.md#constants) bytes for these shares must be set specially. 1. Concatenate the lists of shares in the order: transactions, intermediate state roots, PayForBlob transactions. @@ -347,7 +347,7 @@ These shares are arranged in the [first quadrant](#2d-reed-solomon-encoding-sche Each blob in the list `blobData`: 1. [Serialize](#serialization) the blob (individually). -1. Compute the length of each serialized blob, [serialize the length](#serialization), and pre-pend the serialized blob with its serialized length. +1. Compute the length of each serialized blob, [serialize the length](#serialization), and prepend the serialized blob with its serialized length. 1. Split up the length/blob pairs into [`SHARE_SIZE`](./consensus.md#constants)`-`[`NAMESPACE_SIZE`](./consensus.md#constants)-byte chunks. 1. Create a [share](./shares.md) out of each chunk. The first [`NAMESPACE_SIZE`](./consensus.md#constants) bytes for these shares is set to the namespace. diff --git a/specs/src/specs/params.md b/specs/src/specs/params.md index 2b992e4eff..653182965d 100644 --- a/specs/src/specs/params.md +++ b/specs/src/specs/params.md @@ -52,7 +52,7 @@ are blocked by the `x/paramfilter` module. | mint.BondDenom | utia | Denomination that is inflated and sent to the distribution module account. | False | | mint.DisinflationRate | 0.10 (10%) | The rate at which the inflation rate decreases each year. | False | | mint.InitialInflationRate | 0.08 (8%) | The inflation rate the network starts at. | False | -| mint.TargetInflationRate | 0.015 (1.5%) | The inflation rate that the network aims to stabalize at. | False | +| mint.TargetInflationRate | 0.015 (1.5%) | The inflation rate that the network aims to stabilize at. | False | | slashing.DowntimeJailDuration | 1 min | Duration of time a validator must stay jailed. | True | | slashing.MinSignedPerWindow | 0.75 (75%) | The percentage of SignedBlocksWindow that must be signed not to get jailed. | True | | slashing.SignedBlocksWindow | 5000 | The range of blocks used to count for downtime. | True | diff --git a/test/testground/README.md b/test/testground/README.md index 1b43df1113..70511db826 100644 --- a/test/testground/README.md +++ b/test/testground/README.md @@ -1,4 +1,4 @@ -# Testground Experiement Tooling +# Testground Experiment Tooling ## Test Instance Communication and Experiment Flow @@ -68,7 +68,7 @@ by each node. This allows for arbitrary network topologies to be created. ### Standard The `standard` test runs an experiment that is as close to mainnet as possible. -This is used as a base for other experiements. +This is used as a base for other experiments. ## Running the Experiment diff --git a/test/testground/manifest.toml b/test/testground/manifest.toml index 110cefbaec..2bef01eca9 100644 --- a/test/testground/manifest.toml +++ b/test/testground/manifest.toml @@ -38,7 +38,7 @@ blobs_per_sequence = { type = "int", default = 1 } inbound_peer_count = { type = "int", default = 40 } outbound_peer_count = { type = "int", default = 10 } gov_max_square_size = { type = "int", default = 256 } -max_block_bytes = { type = "int", deafult = 100000000 } +max_block_bytes = { type = "int", default = 100000000 } mempool = { type = "string", default = "v1" } broadcast_txs = { type = "bool", default = true } tracing_nodes = { type = "int", default = 0 } diff --git a/test/testground/network/configurators.go b/test/testground/network/configurators.go index 7b7e66a1ea..3444e5796f 100644 --- a/test/testground/network/configurators.go +++ b/test/testground/network/configurators.go @@ -47,7 +47,7 @@ func GetConfigurators(runenv *runtime.RunEnv) ([]Configurator, error) { return ops, nil } -// Configurator is a function that arbitarily modifies the provided node +// Configurator is a function that arbitrarily modifies the provided node // configurations. It is used to generate the topology (which nodes are // connected to which) of the network, along with making other arbitrary changes // to the configs. diff --git a/test/testground/network/consensus_node.go b/test/testground/network/consensus_node.go index 8e6adc1802..662fb604c5 100644 --- a/test/testground/network/consensus_node.go +++ b/test/testground/network/consensus_node.go @@ -35,7 +35,7 @@ import ( // ConsensusNode is the node type used by testground instances to run a // celestia-app full node. It can optionally be configured to be a validator, -// and has methods to boostrap a network, initialize itself, start, and stop. +// and has methods to bootstrap a network, initialize itself, start, and stop. type ConsensusNode struct { Name string // NetworkKey is the key used for signing gossiped messages. diff --git a/test/txsim/send.go b/test/txsim/send.go index a294fb29bc..f3145020de 100644 --- a/test/txsim/send.go +++ b/test/txsim/send.go @@ -53,7 +53,7 @@ func (s *SendSequence) Init(_ context.Context, _ grpc.ClientConn, allocateAccoun s.accounts = allocateAccounts(s.numAccounts, amount) } -// Next sumbits a transaction to remove funds from one account to the next +// Next submits a transaction to remove funds from one account to the next func (s *SendSequence) Next(_ context.Context, _ grpc.ClientConn, rand *rand.Rand) (Operation, error) { if s.index >= s.numIterations { return Operation{}, ErrEndOfSequence diff --git a/test/util/malicious/hasher.go b/test/util/malicious/hasher.go index 4f023bf096..3022750476 100644 --- a/test/util/malicious/hasher.go +++ b/test/util/malicious/hasher.go @@ -233,10 +233,10 @@ func (n *NmtHasher) ValidateNodeFormat(node []byte) (err error) { // check that two siblings are ordered with respect to their namespaces. func (n *NmtHasher) validateSiblingsNamespaceOrder(left, right []byte) (err error) { if err := n.ValidateNodeFormat(left); err != nil { - return fmt.Errorf("%w: left node does not match the namesapce hash format", err) + return fmt.Errorf("%w: left node does not match the namespace hash format", err) } if err := n.ValidateNodeFormat(right); err != nil { - return fmt.Errorf("%w: right node does not match the namesapce hash format", err) + return fmt.Errorf("%w: right node does not match the namespace hash format", err) } return nil } diff --git a/test/util/malicious/test_app.go b/test/util/malicious/test_app.go index c5c00ae135..4e83dcfba3 100644 --- a/test/util/malicious/test_app.go +++ b/test/util/malicious/test_app.go @@ -32,7 +32,7 @@ func OutOfOrderNamespaceConfig(startHeight int64) *testnode.Config { return TestNodeConfig(bcfg) } -// TestNodeConfig returns a testnode config with the malicous application and +// TestNodeConfig returns a testnode config with the malicious application and // provided behavior set in the app options. func TestNodeConfig(behavior BehaviorConfig) *testnode.Config { cfg := testnode.DefaultConfig(). diff --git a/x/blob/README.md b/x/blob/README.md index a1646dc4c3..5c32f1214b 100644 --- a/x/blob/README.md +++ b/x/blob/README.md @@ -142,7 +142,7 @@ each PFB, to be included in a block must follow a set of validity rules. field of the blob transaction `BlobTx`. 1. Namespace Validity: The namespace of each blob in a blob transaction `BlobTx` must be valid. This validity is determined by the following sub-rules: - 1. The namepsace of each blob must match the respective (same index) + 1. The namespace of each blob must match the respective (same index) namespace in the `MsgPayForBlobs` `sdk.Msg` field `namespaces`. 1. The namespace is not reserved for protocol use. 1. Blob Size: No blob can have a size of 0. @@ -203,7 +203,7 @@ celestia-app tx blob PayForBlobs [fla ``` For submitting PFB transaction via a light client's rpc, see [celestia-node's -documention](https://docs.celestia.org/developers/node-tutorial#submitting-data). +documentation](https://docs.celestia.org/developers/node-tutorial#submitting-data). The steps in the [`SubmitPayForBlobs`](https://github.com/celestiaorg/celestia-app/blob/v1.0.0-rc2/x/blob/payforblob.go#L15-L54) diff --git a/x/blob/types/payforblob.go b/x/blob/types/payforblob.go index edd971f207..153a69691b 100644 --- a/x/blob/types/payforblob.go +++ b/x/blob/types/payforblob.go @@ -152,7 +152,7 @@ func (msg *MsgPayForBlobs) Gas(gasPerByte uint32) uint64 { } // GasToConsume works out the extra gas charged to pay for a set of blobs in a PFB. -// Note that tranasctions will incur other gas costs, such as the signature verification +// Note that transactions will incur other gas costs, such as the signature verification // and reads to the user's account. func GasToConsume(blobSizes []uint32, gasPerByte uint32) uint64 { var totalSharesUsed uint64 diff --git a/x/blobstream/README.md b/x/blobstream/README.md index f040e3a0ba..41e65faa94 100644 --- a/x/blobstream/README.md +++ b/x/blobstream/README.md @@ -192,7 +192,7 @@ A new valset is generated by the valset [`handler`](https://github.com/celestiao #### No valset in store -When `EndBlock` is executed straight after genesis, the store doesn't have any valset created yet. So, it [checks](https://github.com/celestiaorg/celestia-app/blob/9bf0cf1dd9ce31a3fecb51310c3913820b21a8c2/x/qgb/abci.go#L85-L93) wether there is any valset in store, and [generates](https://github.com/celestiaorg/celestia-app/blob/9bf0cf1dd9ce31a3fecb51310c3913820b21a8c2/x/qgb/abci.go#L123-L134) a new one afterwards representing the initial validator set. +When `EndBlock` is executed straight after genesis, the store doesn't have any valset created yet. So, it [checks](https://github.com/celestiaorg/celestia-app/blob/9bf0cf1dd9ce31a3fecb51310c3913820b21a8c2/x/qgb/abci.go#L85-L93) whether there is any valset in store, and [generates](https://github.com/celestiaorg/celestia-app/blob/9bf0cf1dd9ce31a3fecb51310c3913820b21a8c2/x/qgb/abci.go#L123-L134) a new one afterwards representing the initial validator set. #### Validator starts unbonding diff --git a/x/mint/types/constants.go b/x/mint/types/constants.go index 06a7cffbd3..832679bcd8 100644 --- a/x/mint/types/constants.go +++ b/x/mint/types/constants.go @@ -19,7 +19,7 @@ const ( // DisinflationRate is the rate at which the inflation rate decreases each year. DisinflationRate = 0.1 // TargetInflationRate is the inflation rate that the network aims to - // stabalize at. In practice, TargetInflationRate acts as a minimum so that + // stabilize at. In practice, TargetInflationRate acts as a minimum so that // the inflation rate doesn't decrease after reaching it. TargetInflationRate = 0.015 ) From 3dd60f506c609a3af6764b6eaa4bec19139ed679 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Vi=E1=BB=87t=20Ho=C3=A0ng?= Date: Tue, 12 Dec 2023 22:52:11 +0700 Subject: [PATCH 3/4] Update pkg/user/signer.go wow, this is cool Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- pkg/user/signer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/user/signer.go b/pkg/user/signer.go index 8f08b3f993..bcb67b83db 100644 --- a/pkg/user/signer.go +++ b/pkg/user/signer.go @@ -267,7 +267,7 @@ func (s *Signer) PubKey() cryptotypes.PubKey { return s.pk } -// GetSequencer gets the lastest signed sequence and increments the local sequence number +// GetSequence gets the lastest signed sequence and increments the local sequence number func (s *Signer) GetSequence() uint64 { s.mtx.Lock() defer s.mtx.Unlock() From 5709858370bad225b27f926c7c433c05de7d1371 Mon Sep 17 00:00:00 2001 From: hoangdv2429 Date: Tue, 12 Dec 2023 23:01:59 +0700 Subject: [PATCH 4/4] revert --- docs/architecture/adr-014-versioned-namespaces.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/architecture/adr-014-versioned-namespaces.md b/docs/architecture/adr-014-versioned-namespaces.md index ca9dba8d88..af52c0241b 100644 --- a/docs/architecture/adr-014-versioned-namespaces.md +++ b/docs/architecture/adr-014-versioned-namespaces.md @@ -57,7 +57,7 @@ For example, consider the scenario where at mainnet launch blobs are laid out ac - When a namespace starts with `1`, all blobs in the namespace conform to the new set of non-interactive default rules. ```go -optional := []byte{ +optionA := []byte{ 0, // namespace version 1, 2, 3, 4, 5, 6, 7, 8, // namespace ID 1, // info byte (sequence start indicator = true)