summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64/macro-assembler-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64/macro-assembler-x64.cc')
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc452
1 files changed, 227 insertions, 225 deletions
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 3c8cab2d83..9952eb3b65 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -17,12 +17,13 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
root_array_available_(true) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -661,39 +662,30 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : argument num_arguments - 1
// ...
// -- rsp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- rax : number of arguments
// -----------------------------------
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- JumpToExternalReference(ext, result_size);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ Set(rax, function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- int result_size) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(isolate(), result_size);
+ CEntryStub ces(isolate(), 1);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -703,30 +695,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinEntry(rdx, native_context_index);
- InvokeCode(rdx, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
- movp(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(rdi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, native_context_index);
- movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ LoadNativeContextSlot(native_context_index, rdi);
+ InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
}
@@ -899,6 +871,43 @@ void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
}
+void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ testq(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ Cvtqsi2ss(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ movq(tmp, src);
+ shrq(src, Immediate(1));
+ // Recover the least significant bit to avoid rounding errors.
+ andq(tmp, Immediate(1));
+ orq(src, tmp);
+ Cvtqsi2ss(dst, src);
+ addss(dst, dst);
+ bind(&jmp_return);
+}
+
+
+void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ testq(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ Cvtqsi2sd(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ movq(tmp, src);
+ shrq(src, Immediate(1));
+ andq(tmp, Immediate(1));
+ orq(src, tmp);
+ Cvtqsi2sd(dst, src);
+ addsd(dst, dst);
+ bind(&jmp_return);
+}
+
+
void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -929,6 +938,26 @@ void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
}
+void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2siq(dst, src);
+ } else {
+ cvttss2siq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2siq(dst, src);
+ } else {
+ cvttss2siq(dst, src);
+ }
+}
+
+
void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1062,6 +1091,7 @@ void MacroAssembler::SafePush(Smi* src) {
Register MacroAssembler::GetSmiConstant(Smi* source) {
+ STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
xorl(kScratchRegister, kScratchRegister);
@@ -1073,9 +1103,13 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- // Special-casing 0 here to use xorl seems to make things slower, so we don't
- // do it.
- Move(dst, source, Assembler::RelocInfoNone());
+ STATIC_ASSERT(kSmiTag == 0);
+ int value = source->value();
+ if (value == 0) {
+ xorl(dst, dst);
+ } else {
+ Move(dst, source, Assembler::RelocInfoNone());
+ }
}
@@ -2727,6 +2761,17 @@ void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
}
+void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundss(dst, dst, src, mode);
+ } else {
+ roundss(dst, src, mode);
+ }
+}
+
+
void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
RoundingMode mode) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -3808,6 +3853,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -3939,15 +3996,16 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
LoadSharedFunctionInfoSpecialField(
- rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+ rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
ParameterCount expected(rbx);
- InvokeFunction(function, expected, actual, flag, call_wrapper);
+ InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
}
@@ -3957,44 +4015,55 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
Move(rdi, function);
- InvokeFunction(rdi, expected, actual, flag, call_wrapper);
+ InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
DCHECK(function.is(rdi));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(rdi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected,
actual,
- Handle<Code>::null(),
- code,
&done,
&definitely_mismatches,
flag,
Label::kNear,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -4010,8 +4079,6 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4061,13 +4128,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movp(rdx, code_register);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
Call(adaptor, RelocInfo::CODE_TARGET);
@@ -4083,6 +4143,49 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
+ cmpb(step_in_enabled_operand, Immediate(0));
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ Integer32ToSmi(expected.reg(), expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ Integer32ToSmi(actual.reg(), actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiToInteger64(actual.reg(), actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiToInteger64(expected.reg(), expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
void MacroAssembler::StubPrologue() {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
@@ -4307,10 +4410,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movp(scratch, FieldOperand(scratch, offset));
- movp(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -4840,6 +4940,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ movp(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Destination is incremented by length, source, length and scratch are
@@ -4926,16 +5047,16 @@ void MacroAssembler::CopyBytes(Register destination,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- movp(Operand(start_offset, 0), filler);
- addp(start_offset, Immediate(kPointerSize));
+ movp(Operand(current_address, 0), filler);
+ addp(current_address, Immediate(kPointerSize));
bind(&entry);
- cmpp(start_offset, end_offset);
+ cmpp(current_address, end_address);
j(below, &loop);
}
@@ -4966,36 +5087,24 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- movp(dst, GlobalObjectOperand());
- movp(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- movp(scratch,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- movp(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- int offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmpp(map_in_out, FieldOperand(scratch, offset));
+ movp(scratch, NativeContextOperand());
+ cmpp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- movp(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
@@ -5005,14 +5114,10 @@ static const int kRegisterPassedArguments = 4;
static const int kRegisterPassedArguments = 6;
#endif
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- movp(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- movp(function, Operand(function, Context::SlotOffset(index)));
+
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ movp(dst, NativeContextOperand());
+ movp(dst, ContextOperand(dst, index));
}
@@ -5151,10 +5256,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -5164,7 +5269,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -5202,45 +5307,19 @@ void MacroAssembler::JumpIfBlack(Register object,
Label* on_black,
Label::Distance on_black_distance) {
DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+
GetMarkBits(object, bitmap_scratch, mask_scratch);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
- // and a 0 at all other positions, including the position of the second bit.
+ // and a 1 at a position of the second bit. All other positions are zero.
movp(rcx, mask_scratch);
- // Make rcx into a mask that covers both marking bits using the operation
- // rcx = mask | (mask << 1).
- leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
- // Note that we are using a 4-byte aligned 8-byte load.
andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
- Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance) {
- Label is_data_object;
- movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- j(equal, &is_data_object, Label::kNear);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, not_data_object, not_data_object_distance);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5260,104 +5339,27 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(1));
+ movl(mask_reg, Immediate(3));
shlp_cl(mask_reg);
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- Push(mask_scratch);
- // shl. May overflow making the check conservative.
- addp(mask_scratch, mask_scratch);
- testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- Pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = rcx; // Holds map while checking type.
- Register length = rcx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- movp(map, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(not_equal, &not_heap_number, Label::kNear);
- movp(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = rcx;
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- testb(instance_type, Immediate(kExternalStringTag));
- j(zero, &not_external, Label::kNear);
- movp(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- andp(length, Immediate(kStringEncodingMask));
- xorp(length, Immediate(kStringEncodingMask));
- addp(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
- imulp(length, FieldOperand(value, String::kLengthOffset));
- shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- andp(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
- bind(&done);
+ j(zero, value_is_white, distance);
}