Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/rawdb/accessors_chain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ func TestBadBlockStorage(t *testing.T) {
t.Fatalf("Failed to load all bad blocks")
}

// Write a bunch of bad blocks, all the blocks are should sorted
// Write a bunch of bad blocks, all the blocks should sorted
// in reverse order. The extra blocks should be truncated.
for _, n := range rand.Perm(100) {
block := types.NewBlockWithHeader(&types.Header{
Expand Down
2 changes: 1 addition & 1 deletion core/rawdb/freezer_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -894,7 +894,7 @@ func getChunk(size int, b int) []byte {
}

// TODO (?)
// - test that if we remove several head-files, aswell as data last data-file,
// - test that if we remove several head-files, as well as data last data-file,
// the index is truncated accordingly
// Right now, the freezer would fail on these conditions:
// 1. have data files d0, d1, d2, d3
Expand Down
2 changes: 1 addition & 1 deletion core/rawdb/freezer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ func TestFreezerReadonlyValidate(t *testing.T) {
}
require.NoError(t, f.Close())

// Re-openening as readonly should fail when validating
//Re-opening as readonly should fail when validating
// table lengths.
_, err = NewFreezer(dir, "", true, 2049, tables)
if err == nil {
Expand Down
2 changes: 1 addition & 1 deletion core/state/pruner/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
// the trie nodes(and codes) belong to the active state will be filtered
// out. A very small part of stale tries will also be filtered because of
// the false-positive rate of bloom filter. But the assumption is held here
// that the false-positive is low enough(~0.05%). The probablity of the
// that the false-positive is low enough(~0.05%). The probability of the
// dangling node is the state root is super low. So the dangling nodes in
// theory will never ever be visited again.
var (
Expand Down
2 changes: 1 addition & 1 deletion core/state/snapshot/difflayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ var (
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)

// aggregatorItemLimit is an approximate number of items that will end up
// in the agregator layer before it's flushed out to disk. A plain account
// in the aggregator layer before it's flushed out to disk. A plain account
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
Expand Down