Skip to content

Commit

Permalink
Fix common misspellings
Browse files Browse the repository at this point in the history
Bug: chromium:750830
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_chromium_rel_ng;master.tryserver.v8:v8_linux_noi18n_rel_ng
Change-Id: Icab7b5a1c469d5e77d04df8bfca8319784e92af4
Reviewed-on: https://chromium-review.googlesource.com/595655
Commit-Queue: Julien Brianceau <jbriance@cisco.com>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Daniel Ehrenberg <littledan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47072}
  • Loading branch information
jbrianceau authored and Commit Bot committed Aug 2, 2017
1 parent 31173f9 commit b41f857
Show file tree
Hide file tree
Showing 140 changed files with 204 additions and 204 deletions.
2 changes: 1 addition & 1 deletion benchmarks/deltablue.js
Expand Up @@ -790,7 +790,7 @@ Plan.prototype.execute = function () {
* In case 1, the added constraint is stronger than the stay
* constraint and values must propagate down the entire length of the
* chain. In case 2, the added constraint is weaker than the stay
* constraint so it cannot be accomodated. The cost in this case is,
* constraint so it cannot be accommodated. The cost in this case is,
* of course, very low. Typical situations lie somewhere between these
* two extremes.
*/
Expand Down
2 changes: 1 addition & 1 deletion gypfiles/landmine_utils.py
Expand Up @@ -76,7 +76,7 @@ def distributor():
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Returns a string representing the platform this build is targeted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
Expand Down
4 changes: 2 additions & 2 deletions include/v8-profiler.h
Expand Up @@ -389,7 +389,7 @@ class V8_EXPORT HeapGraphNode {
kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap.
kNative = 8, // Native object (not from V8 heap).
kSynthetic = 9, // Synthetic object, usualy used for grouping
kSynthetic = 9, // Synthetic object, usually used for grouping
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string.
Expand Down Expand Up @@ -784,7 +784,7 @@ class V8_EXPORT HeapProfiler {
/**
* Returns the sampled profile of allocations allocated (and still live) since
* StartSamplingHeapProfiler was called. The ownership of the pointer is
* transfered to the caller. Returns nullptr if sampling heap profiler is not
* transferred to the caller. Returns nullptr if sampling heap profiler is not
* active.
*/
AllocationProfile* GetAllocationProfile();
Expand Down
4 changes: 2 additions & 2 deletions include/v8.h
Expand Up @@ -6538,7 +6538,7 @@ struct JitCodeEvent {
struct line_info_t {
// PC offset
size_t offset;
// Code postion
// Code position
size_t pos;
// The position type.
PositionType position_type;
Expand Down Expand Up @@ -7746,7 +7746,7 @@ typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
* ReturnAddressLocationResolver is used as a callback function when v8 is
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
* location to wherever the profiler stashed the original return address.
*
* \param return_addr_location A location on stack where a machine
* return address resides.
Expand Down
2 changes: 1 addition & 1 deletion samples/process.cc
Expand Up @@ -305,7 +305,7 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {


JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// Dispose the persistent handles. When no one else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
context_.Reset();
Expand Down
2 changes: 1 addition & 1 deletion src/accessors.cc
Expand Up @@ -946,7 +946,7 @@ class FrameFunctionIterator {
}
}

// Iterate through functions until the first occurence of 'function'.
// Iterate through functions until the first occurrence of 'function'.
// Returns true if 'function' is found, and false if the iterator ends
// without finding it.
bool Find(JSFunction* function) {
Expand Down
4 changes: 2 additions & 2 deletions src/arm/assembler-arm.cc
Expand Up @@ -1053,7 +1053,7 @@ void Assembler::next(Label* L) {
DCHECK(L->is_linked());
int link = target_at(L->pos());
if (link == L->pos()) {
// Branch target points to the same instuction. This is the end of the link
// Branch target points to the same instruction. This is the end of the link
// chain.
L->Unuse();
} else {
Expand Down Expand Up @@ -1361,7 +1361,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset seperately
// Scaled register offsets are not supported, compute the offset separately
// to a scratch register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Expand Down
2 changes: 1 addition & 1 deletion src/arm/assembler-arm.h
Expand Up @@ -866,7 +866,7 @@ class Assembler : public AssemblerBase {
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// ldr ip, [pc, #0] @ emitted address and start
// blx ip
static constexpr int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;

Expand Down
4 changes: 2 additions & 2 deletions src/arm/disasm-arm.cc
Expand Up @@ -726,7 +726,7 @@ void Decoder::Format(Instruction* instr, const char* format) {


// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
if(!(condition)) { \
Unknown(instr); \
Expand Down Expand Up @@ -2602,7 +2602,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
} else if (instruction_bits == kCodeAgeJumpInstruction) {
// The code age prologue has a constant immediatly following the jump
// The code age prologue has a constant immediately following the jump
// instruction.
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
DecodeType2(instr);
Expand Down
2 changes: 1 addition & 1 deletion src/arm/simulator-arm.cc
Expand Up @@ -5833,7 +5833,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
Expand Down
2 changes: 1 addition & 1 deletion src/arm/simulator-arm.h
Expand Up @@ -444,7 +444,7 @@ class Simulator {
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];

// Syncronization primitives. See ARM DDI 0406C.b, A2.9.
// Synchronization primitives. See ARM DDI 0406C.b, A2.9.
enum class MonitorAccess {
Open,
Exclusive,
Expand Down
2 changes: 1 addition & 1 deletion src/arm64/constants-arm64.h
Expand Up @@ -171,7 +171,7 @@ typedef uint16_t float16;
V_(ImmAddSub, 21, 10, Bits) \
V_(ShiftAddSub, 23, 22, Bits) \
\
/* Add/substract extend */ \
/* Add/subtract extend */ \
V_(ImmExtendShift, 12, 10, Bits) \
V_(ExtendMode, 15, 13, Bits) \
\
Expand Down
2 changes: 1 addition & 1 deletion src/arm64/macro-assembler-arm64.cc
Expand Up @@ -2455,7 +2455,7 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,

// The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
// representable using a double, so if the result is one of those then we know
// that saturation occured, and we need to manually handle the conversion.
// that saturation occurred, and we need to manually handle the conversion.
//
// It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
// 1 will cause signed overflow.
Expand Down
2 changes: 1 addition & 1 deletion src/arm64/macro-assembler-arm64.h
Expand Up @@ -2390,7 +2390,7 @@ class MacroAssembler : public TurboAssembler {
};


// Use this scope when you need a one-to-one mapping bewteen methods and
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
// emitted is what you specified when creating the scope.
Expand Down
2 changes: 1 addition & 1 deletion src/assembler.h
Expand Up @@ -373,7 +373,7 @@ class RelocInfo {
NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explicitly by
// code aging.

FIRST_REAL_RELOC_MODE = CODE_TARGET,
Expand Down
2 changes: 1 addition & 1 deletion src/base/bits.h
Expand Up @@ -333,7 +333,7 @@ FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);

// SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
// SignedSaturatedSub64(lhs, rhs) subtracts |lhs| by |rhs|,
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);

Expand Down
4 changes: 2 additions & 2 deletions src/base/cpu.cc
Expand Up @@ -221,14 +221,14 @@ class CPUInfo final {
delete[] data_;
}

// Extract the content of a the first occurence of a given field in
// Extract the content of a the first occurrence of a given field in
// the content of the cpuinfo file and return it as a heap-allocated
// string that must be freed by the caller using delete[].
// Return NULL if not found.
char* ExtractField(const char* field) const {
DCHECK(field != NULL);

// Look for first field occurence, and ensure it starts the line.
// Look for first field occurrence, and ensure it starts the line.
size_t fieldlen = strlen(field);
char* p = data_;
for (;;) {
Expand Down
2 changes: 1 addition & 1 deletion src/base/functional.h
Expand Up @@ -25,7 +25,7 @@ namespace base {
// the Draft Technical Report on C++ Library Extensions (TR1)).
//
// base::hash is implemented by calling the hash_value function. The namespace
// isn't specified so that it can detect overloads via argument dependant
// isn't specified so that it can detect overloads via argument dependent
// lookup. So if there is a free function hash_value in the same namespace as a
// custom type, it will get called.
//
Expand Down
2 changes: 1 addition & 1 deletion src/base/ieee754.cc
Expand Up @@ -2401,7 +2401,7 @@ double cbrt(double x) {
* error of about 1 in 16. Adding a bias of -0.03306235651 to the
* (e%3+m)/3 term reduces the error to about 1 in 32. With the IEEE
* floating point representation, for finite positive normal values,
* ordinary integer divison of the value in bits magically gives
* ordinary integer division of the value in bits magically gives
* almost exactly the RHS of the above provided we first subtract the
* exponent bias (1023 for doubles) and later add it back. We do the
* subtraction virtually to keep e >= 0 so that ordinary integer
Expand Down
2 changes: 1 addition & 1 deletion src/base/platform/platform.h
Expand Up @@ -237,7 +237,7 @@ class V8_BASE_EXPORT OS {
static void StrNCpy(char* dest, int length, const char* src, size_t n);

// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
// occurring in shared libraries will not be properly accounted for.
struct SharedLibraryAddress {
SharedLibraryAddress(const std::string& library_path, uintptr_t start,
uintptr_t end)
Expand Down
2 changes: 1 addition & 1 deletion src/builtins/builtins-array-gen.cc
Expand Up @@ -764,7 +764,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
&runtime);

// We need to be conservative and start with holey because the builtins
// that create output arrays aren't gauranteed to be called for every
// that create output arrays aren't guaranteed to be called for every
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
Expand Down
2 changes: 1 addition & 1 deletion src/builtins/ia32/builtins-ia32.cc
Expand Up @@ -619,7 +619,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register args_count = scratch1;
Register return_pc = scratch2;

// Get the arguments + reciever count.
// Get the arguments + receiver count.
__ mov(args_count,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(args_count,
Expand Down
2 changes: 1 addition & 1 deletion src/codegen.cc
Expand Up @@ -195,7 +195,7 @@ static int PrintFunctionSource(CompilationInfo* info,
}

// Print information for the given inlining: which function was inlined and
// where the inlining occured.
// where the inlining occurred.
static void PrintInlinedFunctionInfo(
CompilationInfo* info, int source_id, int inlining_id,
const CompilationInfo::InlinedFunctionHolder& h) {
Expand Down
2 changes: 1 addition & 1 deletion src/compiler/bytecode-analysis.cc
Expand Up @@ -424,7 +424,7 @@ int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
if (loop_end_to_header == end_to_header_.end()) {
return -1;
}
// If the header preceeds the offset, this is the loop
// If the header precedes the offset, this is the loop
//
// .> header <--loop_end_to_header
// |
Expand Down
2 changes: 1 addition & 1 deletion src/compiler/instruction-selector.cc
Expand Up @@ -467,7 +467,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
return g->UseUniqueSlot(input);
case FrameStateInputKind::kAny:
// Currently deopts "wrap" other operations, so the deopt's inputs
// are potentially needed untill the end of the deoptimising code.
// are potentially needed until the end of the deoptimising code.
return g->UseAnyAtEnd(input);
}
}
Expand Down
6 changes: 3 additions & 3 deletions src/compiler/js-native-context-specialization.cc
Expand Up @@ -573,7 +573,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
value, effect, control);

// Check {value} map agains the {property_cell} map.
// Check {value} map against the {property_cell} map.
effect =
graph()->NewNode(simplified()->CheckMaps(
CheckMapsFlag::kNone,
Expand Down Expand Up @@ -2289,12 +2289,12 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
DCHECK_EQ(0, receiver_maps->size());
// See if we can infer a concrete type for the {receiver}.
if (InferReceiverMaps(receiver, effect, receiver_maps)) {
// We can assume that the {receiver} still has the infered {receiver_maps}.
// We can assume that the {receiver} still has the inferred {receiver_maps}.
return true;
}
// Try to extract some maps from the {nexus}.
if (nexus.ExtractMaps(receiver_maps) != 0) {
// Try to filter impossible candidates based on infered root map.
// Try to filter impossible candidates based on inferred root map.
Handle<Map> receiver_map;
if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
receiver_maps->erase(
Expand Down
2 changes: 1 addition & 1 deletion src/compiler/load-elimination.h
Expand Up @@ -19,7 +19,7 @@ class Factory;

namespace compiler {

// Foward declarations.
// Forward declarations.
class CommonOperatorBuilder;
struct FieldAccess;
class Graph;
Expand Down
6 changes: 3 additions & 3 deletions src/compiler/mips/code-generator-mips.cc
Expand Up @@ -1081,7 +1081,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt8(2));
break;
case kMipsCmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsAddS:
// TODO(plind): add special case: combine mult & add.
Expand Down Expand Up @@ -1131,7 +1131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMipsAddPair:
__ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
Expand Down Expand Up @@ -2915,7 +2915,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are checked and handled here.
// emit mips pseudo-instructions, which are checked and handled here.

if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
Expand Down
6 changes: 3 additions & 3 deletions src/compiler/mips64/code-generator-mips64.cc
Expand Up @@ -1283,7 +1283,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;

case kMips64CmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64AddS:
// TODO(plind): add special case: combine mult & add.
Expand Down Expand Up @@ -1337,7 +1337,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
// Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64AddD:
// TODO(plind): add special case: combine mult & add.
Expand Down Expand Up @@ -3022,7 +3022,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are handled here by branch
// emit mips pseudo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
Expand Down
2 changes: 1 addition & 1 deletion src/compiler/move-optimizer.h
Expand Up @@ -45,7 +45,7 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {

const Instruction* LastInstruction(const InstructionBlock* block) const;

// Consolidate common moves appearing accross all predecessors of a block.
// Consolidate common moves appearing across all predecessors of a block.
void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(Instruction* instr);

Expand Down
2 changes: 1 addition & 1 deletion src/compiler/register-allocator.cc
Expand Up @@ -2234,7 +2234,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
// Select a hint from a predecessor block that preceeds this block in the
// Select a hint from a predecessor block that precedes this block in the
// rpo order. In order of priority:
// - Avoid hints from deferred blocks.
// - Prefer hints from allocated (or explicit) operands.
Expand Down
2 changes: 1 addition & 1 deletion src/compiler/scheduler.cc
Expand Up @@ -1396,7 +1396,7 @@ class ScheduleLateNodeVisitor {
block->loop_depth(), min_block->id().ToInt());

// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
// into enclosing loop pre-headers until they would preceed their schedule
// into enclosing loop pre-headers until they would precede their schedule
// early position.
BasicBlock* hoist_block = GetHoistBlock(block);
if (hoist_block &&
Expand Down
2 changes: 1 addition & 1 deletion src/conversions-inl.h
Expand Up @@ -489,7 +489,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,

// NOTE: The code for computing the value may seem a bit complex at
// first glance. It is structured to use 32-bit multiply-and-add
// loops as long as possible to avoid loosing precision.
// loops as long as possible to avoid losing precision.

double v = 0.0;
bool done = false;
Expand Down

0 comments on commit b41f857

Please sign in to comment.