summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips/macro-assembler-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips/macro-assembler-mips.cc')
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc843
1 files changed, 387 insertions, 456 deletions
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 4a5a386fa0..3c866ac453 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -19,12 +19,13 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -433,10 +434,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1054,6 +1052,19 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
+ lsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ sll(tmp, rs, sa);
+ Addu(rd, rt, tmp);
+ }
+}
+
+
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
@@ -1267,50 +1278,40 @@ void MacroAssembler::Ins(Register rt,
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
FPURegister scratch) {
- // Move the data from fs to t8.
- mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
+ // In FP64Mode we do convertion from long.
+ if (IsFp64Mode()) {
+ mtc1(rs, scratch);
+ Mthc1(zero_reg, scratch);
+ cvt_d_l(fd, scratch);
+ } else {
+ // Convert rs to a FP value in fd.
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ Label msb_clear, conversion_done;
+ // For a value which is < 2^31, regard it as a signed positve word.
+ Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
+ mtc1(rs, fd);
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
+ li(at, 0x41F00000); // FP value: 2^32.
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
+ // For unsigned inputs > 2^31, we convert to double as a signed int32,
+ // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
+ mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
- Label conversion_done;
+ cvt_d_w(fd, fd);
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_d(fd, fd, scratch);
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(zero_reg, scratch);
- Mthc1(at, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
+ bind(&msb_clear);
+ cvt_d_w(fd, fd);
- bind(&conversion_done);
+ bind(&conversion_done);
+ }
}
@@ -1438,13 +1439,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (!IsMipsArchVariant(kMips32r6)) {
if (long_branch) {
Label skip;
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
BranchLong(nan, bd);
bind(&skip);
} else {
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
@@ -1456,13 +1457,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
BranchLong(nan, bd);
bind(&skip);
} else {
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
@@ -3270,12 +3271,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(t9));
- DCHECK(!scratch2.is(t9));
- DCHECK(!result.is(t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3291,54 +3287,52 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ lw(result, MemOperand(top_address));
+ lw(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ lw(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ lw(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
+ And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
}
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(object_size));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sw(scratch2, MemOperand(topaddr));
+ Addu(result_end, result, Operand(object_size));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
+ sw(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3347,28 +3341,25 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
+ li(scratch, 0x7191);
+ li(result_end, 0x7291);
}
jmp(gc_required);
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(t9));
- DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, t9));
+ DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3377,45 +3368,42 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ lw(result, MemOperand(top_address));
+ lw(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ lw(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ lw(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
+ And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
}
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
@@ -3424,19 +3412,19 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- sll(scratch2, object_size, kPointerSizeLog2);
- Addu(scratch2, result, scratch2);
+ sll(result_end, object_size, kPointerSizeLog2);
+ Addu(result_end, result, result_end);
} else {
- Addu(scratch2, result, Operand(object_size));
+ Addu(result_end, result, Operand(object_size));
}
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
}
- sw(scratch2, MemOperand(topaddr));
+ sw(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3612,29 +3600,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.reg_code = i;
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- lw(tmp, FieldMemOperand(src, i * kPointerSize));
- sw(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ sw(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3706,16 +3690,16 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
- sw(filler, MemOperand(start_offset));
- Addu(start_offset, start_offset, kPointerSize);
+ sw(filler, MemOperand(current_address));
+ Addu(current_address, current_address, kPointerSize);
bind(&entry);
- Branch(&loop, ult, start_offset, Operand(end_offset));
+ Branch(&loop, ult, current_address, Operand(end_address));
}
@@ -3766,6 +3750,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch3,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
+ scratch3));
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3820,7 +3806,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- Register untagged_value = elements_reg;
+ Register untagged_value = scratch2;
SmiUntag(untagged_value, value_reg);
mtc1(untagged_value, f2);
cvt_d_w(f0, f2);
@@ -3985,8 +3971,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4006,7 +3990,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(a0));
DCHECK(expected.is_immediate() || expected.reg().is(a2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4034,11 +4017,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -4056,21 +4034,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ li(t0, Operand(step_in_enabled));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(a1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- Label done;
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ }
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = t0;
+ lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4087,6 +4122,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -4096,18 +4132,18 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function.is(a1));
Register expected_reg = a2;
- Register code_reg = a3;
+ Register temp_reg = t0;
- lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
sra(expected_reg, expected_reg, kSmiTagSize);
- lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, new_target, expected, actual, flag,
+ call_wrapper);
}
@@ -4125,11 +4161,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4301,108 +4333,161 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::AddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
- AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- Addu(dst, left, right.immediate()); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- // Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(overflow_dst, dst, t9);
- and_(overflow_dst, overflow_dst, scratch);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Register right_reg = t9;
+ DCHECK(!left.is(right_reg));
+ li(right_reg, Operand(right));
+ AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
} else {
- Addu(dst, left, right.immediate());
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, dst, t9);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Addu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Addu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, dst, scratch);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- if (left.is(right) && dst.is(left)) {
- DCHECK(!dst.is(t9));
- DCHECK(!scratch.is(t9));
- DCHECK(!left.is(t9));
- DCHECK(!right.is(t9));
- DCHECK(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
+void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ DCHECK(!dst.is(scratch));
+ Register left_reg = left.is(dst) ? scratch : left;
+ Register right_reg = right.is(dst) ? t9 : right;
+ DCHECK(!dst.is(left_reg));
+ DCHECK(!dst.is(right_reg));
+ Move(left_reg, left);
+ Move(right_reg, right);
+ addu(dst, left, right);
+ bnvc(left_reg, right_reg, no_overflow_label);
+ } else {
+ bovc(left, right, overflow_label);
+ addu(dst, left, right);
+ if (no_overflow_label) bc(no_overflow_label);
+ }
} else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+void MacroAssembler::SubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
if (right.is_reg()) {
- SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
- Subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
+ Subu(dst, left, right.immediate()); // Left is overwritten.
// Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, scratch, t9); // scratch is original left.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
Subu(dst, left, right);
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, left, t9);
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, left, scratch);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
+void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
DCHECK(!scratch.is(left));
@@ -4412,8 +4497,9 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
// left == right, let's not make that restriction here.
if (left.is(right)) {
mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
}
if (dst.is(left)) {
@@ -4434,6 +4520,7 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
xor_(scratch, left, right);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
@@ -4469,24 +4556,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -4508,34 +4584,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- lw(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, native_context_index);
- // Load the code entry point from the builtins object.
- lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, a1);
+ InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -4672,47 +4724,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- lw(dst, GlobalObjectOperand());
- lw(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- lw(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- lw(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(at, FieldMemOperand(scratch, offset));
+ lw(scratch, NativeContextMemOperand());
+ lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(map_in_out, FieldMemOperand(scratch, offset));
+ lw(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- lw(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- lw(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ lw(dst, NativeContextMemOperand());
+ lw(dst, ContextMemOperand(dst, index));
}
@@ -5115,6 +5149,17 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5385,8 +5430,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -5420,28 +5465,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- DCHECK(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5457,112 +5480,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- sll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, FieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- srl(t9, t9, 1);
- bind(&skip);
- }
- Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ Branch(value_is_white, eq, t8, Operand(zero_reg));
}
@@ -5767,17 +5701,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -5788,18 +5718,19 @@ bool AreAliased(Register reg1,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -5811,7 +5742,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.