Permalink
Browse files

Fix some style issues in the ARM code.

Also move a function into the macro assembler.
Fix some *& placement errors that had accumulated.
Review URL: http://codereview.chromium.org/385069

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@3293 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
  • Loading branch information...
1 parent c9b58da commit 6c65b6e4868a4998eeda4f1f5fd70bdc239d5e05 erik.corry@gmail.com committed Nov 12, 2009
View
@@ -77,9 +77,9 @@ class Arguments BASE_EMBEDDED {
// can.
class CustomArguments : public Relocatable {
public:
- inline CustomArguments(Object *data,
- JSObject *self,
- JSObject *holder) {
+ inline CustomArguments(Object* data,
+ JSObject* self,
+ JSObject* holder) {
values_[3] = self;
values_[2] = holder;
values_[1] = Smi::FromInt(0);
@@ -85,7 +85,7 @@ Object* RelocInfo::target_object() {
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
View
@@ -43,31 +43,30 @@ namespace v8 {
namespace internal {
// Safe default is no features.
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
- // Perform runtime detection of VFP.
- static const char* descriptive_file_linux = "/proc/cpuinfo";
-
- #if !defined(__arm__) || (defined(__VFP_FP__) && !defined(__SOFTFP__))
- // The supported & enabled flags for VFP are set to true for the following
- // conditions, even without runtime detection of VFP:
- // (1) For the simulator=arm build, always use VFP since
- // the arm simulator has VFP support.
- // (2) If V8 is being compiled with GCC with the vfp option turned on,
- // always use VFP since the build system assumes that V8 will run on
- // a platform that has VFP hardware.
- supported_ |= static_cast<uint64_t>(1) << VFP3;
- enabled_ |= static_cast<uint64_t>(1) << VFP3;
- #endif
-
- if (OS::fgrep_vfp(descriptive_file_linux, "vfp")) {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__) || (defined(__VFP_FP__) && !defined(__SOFTFP__))
+ // The supported flags for VFP are set to true for the following
+ // conditions, even without runtime detection of VFP:
+ // (1) For the simulator=arm build, always use VFP since
+ // the arm simulator has VFP support.
+ // (2) If V8 is being compiled with GCC with the vfp option turned on,
+ // always use VFP since the build system assumes that V8 will run on
+ // a platform that has VFP hardware.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) return; // No features if we might serialize.
+
+ if (OS::ArmCpuHasFeature(OS::VFP)) {
// This implementation also sets the VFP flags if
// runtime detection of VFP returns true.
- supported_ |= static_cast<uint64_t>(1) << VFP3;
- enabled_ |= static_cast<uint64_t>(1) << VFP3;
+ supported_ |= 1u << VFP3;
}
+#endif
}
// -----------------------------------------------------------------------------
View
@@ -435,16 +435,33 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(Feature f) {
if (f == VFP3 && !FLAG_enable_vfp3) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (supported_ & (1u << f)) != 0;
}
// Check whether a feature is currently enabled.
static bool IsEnabled(Feature f) {
- return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (enabled_ & (1u << f)) != 0;
}
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(Feature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(Feature f) {}
+#endif
+ };
private:
- static uint64_t supported_;
- static uint64_t enabled_;
+ static unsigned supported_;
+ static unsigned enabled_;
};
View
@@ -284,7 +284,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
View
@@ -4472,7 +4472,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
@@ -4599,21 +4599,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
-static void IntegerToDoubleConversionWithVFP3(MacroAssembler* masm,
- Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- // This VFP3 implementation is known to work
- // on ARMv7-VFP3 Snapdragon processor.
-
- __ mov(r7, Operand(inReg, ASR, kSmiTagSize));
- __ fmsr(s15, r7);
- __ fsitod(d7, s15);
- __ fmrrd(outLowReg, outHighReg, d7);
-}
-
-
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* rhs_not_nan,
@@ -4639,7 +4624,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ push(lr);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r1, r3, r2);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
} else {
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
@@ -4676,7 +4662,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r0, r1, r0);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
} else {
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
@@ -4886,10 +4873,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
EmitNanCheck(masm, &rhs_not_nan, cc_);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison.
- // This VFP3 implementation is known to work on
- // ARMv7-VFP3 Snapdragon processor.
-
__ fmdrr(d6, r0, r1);
__ fmdrr(d7, r2, r3);
@@ -5005,8 +4990,9 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2);
- IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@@ -5058,7 +5044,8 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -5089,7 +5076,8 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
}
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -5113,26 +5101,31 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
(Token::DIV == operation) ||
(Token::ADD == operation) ||
(Token::SUB == operation))) {
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
- // This VFP3 implementation is known to work on
- // ARMv7-VFP3 Snapdragon processor
-
- __ fmdrr(d6, r0, r1);
- __ fmdrr(d7, r2, r3);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
- if (Token::MUL == operation) __ fmuld(d5, d6, d7);
- else if (Token::DIV == operation) __ fdivd(d5, d6, d7);
- else if (Token::ADD == operation) __ faddd(d5, d6, d7);
- else if (Token::SUB == operation) __ fsubd(d5, d6, d7);
+ if (Token::MUL == operation) {
+ __ fmuld(d5, d6, d7);
+ } else if (Token::DIV == operation) {
+ __ fdivd(d5, d6, d7);
+ } else if (Token::ADD == operation) {
+ __ faddd(d5, d6, d7);
+ } else if (Token::SUB == operation) {
+ __ fsubd(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
- __ fmrrd(r0, r1, d5);
+ __ fmrrd(r0, r1, d5);
- __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
- __ mov(r0, Operand(r5));
- __ mov(pc, lr);
- return;
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
+ __ mov(pc, lr);
+ return;
}
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
@@ -5211,10 +5204,9 @@ static void GetInt32(MacroAssembler* masm,
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
- // This VFP3 implementation is known to work on
- // ARMv7-VFP3 Snapdragon processor.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ fmdrr(d7, scratch2, scratch);
__ ftosid(s15, d7);
@@ -5227,7 +5219,7 @@ static void GetInt32(MacroAssembler* masm,
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
+ // distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
View
@@ -66,6 +66,7 @@ const char* Registers::Name(int reg) {
return result;
}
+
// Support for VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2"
// These register names are defined in a way to match the native disassembler
@@ -76,19 +77,16 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
};
+
const char* VFPRegisters::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumVFPRegisters)) {
- result = names_[reg];
- } else {
- result = "no_vfp_reg";
- }
- return result;
+ ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg];
}
+
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
View
@@ -320,7 +320,7 @@ class Registers {
struct RegisterAlias {
int reg;
- const char *name;
+ const char* name;
};
private:
Oops, something went wrong.

0 comments on commit 6c65b6e

Please sign in to comment.