summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm/code-stubs-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm/code-stubs-arm.cc')
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1013
1 files changed, 502 insertions, 511 deletions
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 5bb2116263..9484f85f97 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -41,8 +41,7 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -627,24 +626,6 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -655,11 +636,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label is_smi, done;
@@ -716,11 +695,9 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label done;
Label not_in_int32_range;
@@ -752,13 +729,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register scratch2,
SwVfpRegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
+ ASSERT(!int_scratch.is(dst_mantissa));
+ ASSERT(!int_scratch.is(dst_exponent));
Label done;
@@ -767,56 +744,57 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst2 | dst1 |
+ // | dst_exponent | dst_mantissa |
// | s | exp | mantissa |
// Check for zero.
__ cmp(int_scratch, Operand::Zero());
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
+ __ mov(dst_exponent, int_scratch);
+ __ mov(dst_mantissa, int_scratch);
__ b(eq, &done);
// Preload the sign of the value.
- __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ CountLeadingZeros(dst1, int_scratch, scratch2);
- __ rsb(dst1, dst1, Operand(31));
+ __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
+ __ rsb(dst_mantissa, dst_mantissa, Operand(31));
// Set the exponent.
- __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst2, scratch2, scratch2,
+ __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst_exponent, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
+ __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
- __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
+ SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
- __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
+ __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst1, Operand(int_scratch, LSL, scratch2));
+ __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
__ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst2, dst2, scratch2);
+ __ orr(dst_exponent, dst_exponent, scratch2);
// Set dst1 to 0.
- __ mov(dst1, Operand::Zero());
+ __ mov(dst_mantissa, Operand::Zero());
}
__ bind(&done);
}
@@ -826,8 +804,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
+ DwVfpRegister double_scratch,
+ Register dst_mantissa,
+ Register dst_exponent,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -843,16 +822,14 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
+ dst_exponent, scratch2, single_scratch);
__ b(&done);
__ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
@@ -863,36 +840,62 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_dst,
scratch1,
+ double_dst,
scratch2,
+ double_scratch,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ b(ne, not_int32);
if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers..
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ // Load the double value in the destination registers.
+ bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
+ if (save_registers) {
+ // Save both output registers, because the other one probably holds
+ // an important value too.
+ __ Push(dst_exponent, dst_mantissa);
+ }
+ __ Ldrd(dst_mantissa, dst_exponent,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
// Check for 0 and -0.
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst2));
+ Label zero;
+ __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
+ __ orr(scratch1, scratch1, Operand(dst_mantissa));
__ cmp(scratch1, Operand::Zero());
- __ b(eq, &done);
+ __ b(eq, &zero);
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+ Label restore_input_and_miss;
+ DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
+ &restore_input_and_miss);
- // dst1 and dst2 were trashed. Reload the double value.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ // dst_* were trashed. Reload the double value.
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ Ldrd(dst_mantissa, dst_exponent,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ b(&done);
+
+ __ bind(&restore_input_and_miss);
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ b(not_int32);
+
+ __ bind(&zero);
+ if (save_registers) {
+ __ Drop(2);
+ }
}
__ bind(&done);
@@ -906,7 +909,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DwVfpRegister double_scratch,
+ DwVfpRegister double_scratch0,
+ DwVfpRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -914,38 +918,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done;
+ Label done, maybe_undefined;
__ UntagAndJumpIfSmi(dst, object, &done);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- SwVfpRegister single_scratch = double_scratch.low();
+
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
+ __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
+ dst,
+ double_scratch0,
scratch1,
- scratch2,
+ double_scratch1,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ b(ne, not_int32);
- // Get the result in the destination register.
- __ vmov(dst, single_scratch);
-
} else {
// Load the double value in the destination registers.
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -973,20 +973,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
}
+ __ b(&done);
+
+ __ bind(&maybe_undefined);
+ __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ __ b(ne, not_int32);
+ // |undefined| is truncated to 0.
+ __ mov(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
+ Register src_exponent,
+ Register src_mantissa,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ubfx(scratch,
- src1,
+ src_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1006,11 +1014,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
+ __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
__ cmp(tmp, Operand(30));
__ b(gt, not_int32);
// - Bits [21:0] in the mantissa are not null.
- __ tst(src2, Operand(0x3fffff));
+ __ tst(src_mantissa, Operand(0x3fffff));
__ b(ne, not_int32);
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1021,19 +1029,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
- src2,
+ src_mantissa,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
__ orr(dst,
dst,
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
// Create the mask and test the lower bits (of the higher bits).
__ rsb(scratch, scratch, Operand(32));
- __ mov(src2, Operand(1));
- __ mov(src1, Operand(src2, LSL, scratch));
- __ sub(src1, src1, Operand(1));
- __ tst(dst, src1);
+ __ mov(src_mantissa, Operand(1));
+ __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
+ __ sub(src_exponent, src_exponent, Operand(1));
+ __ tst(dst, src_exponent);
__ b(ne, not_int32);
}
@@ -1157,48 +1165,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, &not_identical);
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1213,47 +1216,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(&not_identical);
}
@@ -1687,42 +1688,60 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-// On entry lhs_ and rhs_ are the values to be compared.
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r1;
+ Register rhs = r0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, &not_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
+ __ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1733,7 +1752,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
@@ -1756,7 +1775,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// If one of the sides was a NaN then the v flag is set. Load r0 with
// whatever it takes to make the comparison fail, since comparisons with NaN
// always fail.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
@@ -1765,19 +1784,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
+ EmitNanCheck(masm, &lhs_not_nan, cc);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ EmitTwoNonNanDoubleComparison(masm, cc);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
Label check_for_symbols;
@@ -1787,8 +1806,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// that case. If the inputs are not doubles then jumps to check_for_symbols.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
@@ -1796,31 +1815,31 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
- if (cc_ == eq && !strict_) {
+ if (cc == eq && !strict()) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4,
@@ -1830,18 +1849,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ASSERT(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1851,6 +1870,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -2334,20 +2356,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(r1, r0);
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
+ __ push(r2);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -2358,59 +2383,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2420,7 +2394,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ add(right, left, Operand(right), SetCC); // Add optimistically.
__ Ret(vc);
@@ -2535,10 +2509,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2546,15 +2534,21 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
+ }
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
}
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2564,25 +2558,44 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP2) &&
- op_ != Token::MOD ?
+ op != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
+ // Load right operand to d7 or r2/r3.
+ if (right_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, right, destination, d7, d8, r2, r3, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
+ : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, right, d7, r2, r3, heap_number_map,
+ scratch1, scratch2, fail);
+ }
+ // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, left, destination, d6, d8, r0, r1, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
+ : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, left, d6, r0, r1, heap_number_map,
+ scratch1, scratch2, fail);
+ }
}
// Calculate the result.
@@ -2591,7 +2604,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP2);
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
@@ -2615,7 +2628,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
+ op,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2656,7 +2669,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
}
Label result_not_a_smi;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
@@ -2707,8 +2720,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
}
// r2: Answer as signed int32.
@@ -2723,7 +2737,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
@@ -2748,12 +2762,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
Label not_smis;
Register left = r1;
@@ -2766,12 +2782,14 @@ void BinaryOpStub::GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, &not_smis, op, mode);
}
__ bind(&not_smis);
}
@@ -2783,14 +2801,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2798,23 +2816,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2843,14 +2852,13 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
Register left = r1;
Register right = r0;
Register scratch1 = r7;
Register scratch2 = r9;
DwVfpRegister double_scratch = d0;
- SwVfpRegister single_scratch = s3;
Register heap_number_result = no_reg;
Register heap_number_map = r6;
@@ -2866,7 +2874,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2876,6 +2884,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
@@ -2888,6 +2905,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
right,
destination,
d7,
+ d8,
r2,
r3,
heap_number_map,
@@ -2899,6 +2917,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
left,
destination,
d6,
+ d8,
r4,
r5,
heap_number_map,
@@ -2934,10 +2953,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// transition.
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- d5,
scratch1,
- scratch2);
+ d5,
+ scratch2,
+ d8);
if (result_type_ <= BinaryOpIC::INT32) {
// If the ne condition is set, result does
@@ -2946,7 +2965,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check if the result fits in a smi.
- __ vmov(scratch1, single_scratch);
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
@@ -2973,12 +2991,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
: BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
@@ -2997,12 +3016,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
@@ -3041,6 +3061,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
d0,
+ d1,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
@@ -3050,6 +3071,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
d0,
+ d1,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
@@ -3105,12 +3127,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -3154,6 +3177,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3192,20 +3216,32 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
+ Label call_runtime, call_string_add_or_runtime, transition;
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3213,6 +3249,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3248,61 +3285,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(r0) && !result.is(r1));
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3315,7 +3311,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
__ mov(result, Operand(overwritable_operand));
__ bind(&allocated);
} else {
- ASSERT(mode_ == NO_OVERWRITE);
+ ASSERT(mode == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -3444,8 +3440,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
Label no_update;
Label skip_cache;
@@ -3636,13 +3632,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label not_plus_half;
// Test for 0.5.
- __ vmov(double_scratch, 0.5);
+ __ vmov(double_scratch, 0.5, scratch);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &not_plus_half);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vneg(double_result, double_scratch, eq);
__ b(eq, &done);
@@ -3653,20 +3649,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&not_plus_half);
- __ vmov(double_scratch, -0.5);
+ __ vmov(double_scratch, -0.5, scratch);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &call_runtime);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vmov(double_result, kDoubleRegZero, eq);
__ b(eq, &done);
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1.0);
+ __ vmov(double_result, 1.0, scratch);
__ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch);
__ jmp(&done);
@@ -3701,7 +3697,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0);
+ __ vmov(double_result, 1.0, scratch2);
// Get absolute value of exponent.
__ cmp(scratch, Operand(0));
@@ -3717,7 +3713,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Operand(0));
__ b(ge, &done);
- __ vmov(double_scratch, 1.0);
+ __ vmov(double_scratch, 1.0, scratch);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
@@ -4930,7 +4926,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
+ STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
@@ -5154,7 +5150,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -5232,12 +5228,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(factory->the_hole_value()));
+ // Fill contents of fixed-array with undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// r0: JSArray, tagged.
- // r2: the hole.
+ // r2: undefined.
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
@@ -5432,48 +5428,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5923,7 +5877,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
__ cmp(chars, scratch);
__ b(eq, &found_in_symbol_table);
__ bind(&next_probe[i]);
@@ -6006,23 +5960,28 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r2, ASR, 1), SetCC);
- __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ // Arithmetic shift right by one un-smi-tags. In this case we rotate right
+ // instead because we bail out on non-smi values: ROR and ASR are equivalent
+ // for smis but they set the flags in a way that's easier to optimize.
+ __ mov(r2, Operand(r2, ROR, 1), SetCC);
+ __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
+ // If either to or from had the smi tag bit set, then C is set now, and N
+ // has the same value: we rotated by 1, so the bottom bit is now the top bit.
// We want to bailout to runtime here if From is negative. In that case, the
// next instruction is not executed and we fall through to bailing out to
- // runtime. pl is the opposite of mi.
- // Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, pl);
- __ b(mi, &runtime); // Fail if from > to.
+ // runtime.
+ // Executed if both r2 and r3 are untagged integers.
+ __ sub(r2, r2, Operand(r3), SetCC, cc);
+ // One of the above un-smis or the above SUB could have set N==1.
+ __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r0, &runtime);
- Condition is_string = masm->IsObjectStringType(r0, r1);
+ // Do a JumpIfSmi, but fold its jump into the subsequent string test.
+ __ tst(r0, Operand(kSmiTagMask));
+ Condition is_string = masm->IsObjectStringType(r0, r1, ne);
+ ASSERT(is_string == eq);
__ b(NegateCondition(is_string), &runtime);
// Short-cut for the case of trivial substring.
@@ -6093,7 +6052,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
@@ -6131,12 +6090,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
@@ -6146,13 +6105,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
__ add(r5, r5, r3);
// Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string
// r1: first character of result string
// r2: result string length
// r5: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
@@ -6277,7 +6236,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
@@ -6430,8 +6389,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -6450,7 +6409,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode)
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6500,11 +6459,6 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kAsciiDataHintMask));
__ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
@@ -6537,10 +6491,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r7,
r0,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &first_prepared);
@@ -6553,10 +6507,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r1,
r1,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &second_prepared);
@@ -6579,7 +6533,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(eq, &non_ascii_string_add_flat_result);
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
// r7: first character of first string.
// r1: first character of second string.
@@ -6670,7 +6624,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -6691,31 +6645,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &generic_stub);
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
+ // stub if NaN is involved or VFP2 is unsupported.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(r2, r0); // Can't clobber r0 yet.
+ SwVfpRegister single_scratch = d2.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d1, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(r2, r1); // Can't clobber r1 yet.
+ single_scratch = d3.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d0, single_scratch);
- // Compare operands
+ __ bind(&done);
+ // Compare operands.
__ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
@@ -6729,14 +6705,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
+ __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
@@ -6754,7 +6732,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
Label miss;
// Registers containing left and right operands respectively.
@@ -6792,7 +6770,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6870,7 +6848,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -7064,8 +7042,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
@@ -7262,6 +7239,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
+
bool RecordWriteStub::IsPregenerated() {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
@@ -7303,6 +7281,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return CpuFeatures::IsSupported(VFP2);
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -7398,12 +7381,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(r1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ ldr(r1, MemOperand(address, 0));
- }
+ __ Move(r1, address);
__ mov(r2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7431,6 +7409,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
+ __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ ldr(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
+ __ str(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ b(mi, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -7551,7 +7539,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
+ __ StoreNumberToDoubleElements(r0, r3,
+ // Overwrites all regs after this.
+ r5, r6, r7, r9, r2,
&slow_elements);
__ Ret();
}
@@ -7559,6 +7549,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
+ PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
@@ -7570,7 +7561,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + Assembler::kInstrSize;
+ 3 * Assembler::kInstrSize;
// Save live volatile registers.
__ Push(lr, r5, r1);