summaryrefslogtreecommitdiff
path: root/deps/v8/src/s390/macro-assembler-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/s390/macro-assembler-s390.cc')
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc472
1 files changed, 304 insertions, 168 deletions
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 21058f420f..8b708de734 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -70,10 +70,6 @@ void MacroAssembler::Call(Register target) {
Label start;
bind(&start);
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
// Branch to target via indirect branch
basr(r14, target);
@@ -122,10 +118,6 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
bind(&start);
#endif
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
basr(r14, ip);
@@ -178,19 +170,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- mov(dst, Operand(cell));
- LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- mov(dst, Operand(value));
- }
- }
+ mov(dst, Operand(value));
}
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
@@ -480,8 +460,8 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
- DCHECK(kJSCallerSaved & code_entry.bit());
- MultiPush(kJSCallerSaved | r14.bit());
+ // DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
@@ -499,7 +479,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
}
// Restore caller-saved registers (including js_function and code_entry).
- MultiPop(kJSCallerSaved | r14.bit());
+ MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
bind(&done);
}
@@ -645,8 +625,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
@@ -677,12 +656,12 @@ void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
}
void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
- cefbr(dst, src);
+ cefbr(Condition(4), dst, src);
}
void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
DoubleRegister dst) {
- celfbr(Condition(0), Condition(0), dst, src);
+ celfbr(Condition(4), Condition(0), dst, src);
}
#if V8_TARGET_ARCH_S390X
@@ -781,7 +760,7 @@ void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
m = Condition(5);
break;
case kRoundToNearest:
- UNIMPLEMENTED();
+ m = Condition(4);
break;
case kRoundToPlusInf:
m = Condition(6);
@@ -794,6 +773,10 @@ void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
break;
}
cfebr(m, dst, double_input);
+ Label done;
+ b(Condition(0xe), &done, Label::kNear); // special case
+ LoadImmP(dst, Operand::Zero());
+ bind(&done);
ldgr(double_dst, dst);
}
@@ -819,6 +802,10 @@ void MacroAssembler::ConvertFloat32ToUnsignedInt32(
break;
}
clfebr(m, Condition(0), dst, double_input);
+ Label done;
+ b(Condition(0xe), &done, Label::kNear); // special case
+ LoadImmP(dst, Operand::Zero());
+ bind(&done);
ldgr(double_dst, dst);
}
@@ -987,9 +974,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- LoadP(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
@@ -1022,6 +1008,20 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
return frame_ends;
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ CleanseP(r14);
+ Push(r14, fp, context, target);
+ la(fp, MemOperand(sp, 2 * kPointerSize));
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(r14, fp, context, target);
+}
+
// ExitFrame layout (probably wrongish.. needs updating)
//
// SP -> previousSP
@@ -1046,7 +1046,10 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// gaps
// Args
// ABIRes <- newSP
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -1057,7 +1060,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// all of the pushes that have happened inside of V8
// since we were called from C code
CleanseP(r14);
- LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
+ LoadSmiLiteral(r1, Smi::FromInt(frame_type));
PushCommonFrame(r1);
// Reserve room for saved entry sp and code object.
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1310,12 +1313,13 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- mov(r6, Operand(step_in_enabled));
- LoadlB(r6, MemOperand(r6));
- CmpP(r6, Operand::Zero());
- beq(&skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ mov(r6, Operand(last_step_action));
+ LoadB(r6, MemOperand(r6));
+ CmpP(r6, Operand(StepIn));
+ blt(&skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1706,6 +1710,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1767,7 +1772,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- beq(&aligned);
+ beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
CmpLogicalP(result, alloc_limit);
bge(gc_required);
@@ -1792,17 +1797,20 @@ void MacroAssembler::Allocate(int object_size, Register result,
blt(gc_required);
AddP(result_end, result, result_end);
}
- StoreP(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- AddP(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1860,7 +1868,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- beq(&aligned);
+ beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
CmpLogicalP(result, alloc_limit);
bge(gc_required);
@@ -1892,12 +1900,114 @@ void MacroAssembler::Allocate(Register object_size, Register result,
AndP(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
+ }
+
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ Register top_address = scratch;
+ mov(top_address, Operand(allocation_top));
+ LoadP(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ AndP(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned, Label::kNear);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ StoreW(result_end, MemOperand(result));
+ AddP(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
+ AddP(result_end, result, result_end);
+ } else {
+ AddP(result_end, result, object_size);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ AndP(r0, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, cr0);
+ }
StoreP(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- AddP(result, result, Operand(kHeapObjectTag));
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
}
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+ LoadP(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ AndP(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned, Label::kNear);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ StoreW(result_end, MemOperand(result));
+ AddP(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top using result.
+ AddP(result_end, result, Operand(object_size));
+
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
+
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result, Register length,
@@ -1914,7 +2024,8 @@ void MacroAssembler::AllocateTwoByteString(Register result, Register length,
AndP(scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
@@ -1934,7 +2045,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
AndP(scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -1946,7 +2058,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
scratch2);
@@ -1957,7 +2069,7 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -1969,7 +2081,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
scratch2);
@@ -1981,7 +2093,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2076,89 +2188,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
FixedDoubleArray::kHeaderSize - elements_offset));
}
-void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- // TODO(joransiu): Optimize paths for left == right.
- bool left_is_right = left.is(right);
-
- // C = A+B; C overflows if A/B have same sign and C has diff sign than A
- if (dst.is(left)) {
- LoadRR(scratch, left); // Preserve left.
- AddP(dst, left, right); // Left is overwritten.
- XorP(overflow_dst, scratch, dst); // Original left.
- if (!left_is_right) XorP(scratch, dst, right);
- } else if (dst.is(right)) {
- LoadRR(scratch, right); // Preserve right.
- AddP(dst, left, right); // Right is overwritten.
- XorP(overflow_dst, dst, left);
- if (!left_is_right) XorP(scratch, dst, scratch);
- } else {
- AddP(dst, left, right);
- XorP(overflow_dst, dst, left);
- if (!left_is_right) XorP(scratch, dst, right);
- }
- if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst);
- LoadAndTestRR(overflow_dst, overflow_dst);
-}
-
-void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
- intptr_t right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
-
- mov(r1, Operand(right));
- AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch);
-}
-
-void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
- if (dst.is(left)) {
- LoadRR(scratch, left); // Preserve left.
- SubP(dst, left, right); // Left is overwritten.
- XorP(overflow_dst, dst, scratch);
- XorP(scratch, right);
- AndP(overflow_dst, scratch /*, SetRC*/);
- LoadAndTestRR(overflow_dst, overflow_dst);
- // Should be okay to remove rc
- } else if (dst.is(right)) {
- LoadRR(scratch, right); // Preserve right.
- SubP(dst, left, right); // Right is overwritten.
- XorP(overflow_dst, dst, left);
- XorP(scratch, left);
- AndP(overflow_dst, scratch /*, SetRC*/);
- LoadAndTestRR(overflow_dst, overflow_dst);
- // Should be okay to remove rc
- } else {
- SubP(dst, left, right);
- XorP(overflow_dst, dst, left);
- XorP(scratch, left, right);
- AndP(overflow_dst, scratch /*, SetRC*/);
- LoadAndTestRR(overflow_dst, overflow_dst);
- // Should be okay to remove rc
- }
-}
-
void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
Label* early_success) {
LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2527,9 +2556,11 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
mov(r3, Operand(builtin));
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2613,16 +2644,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- LoadSmiLiteral(r0, Smi::FromInt(reason));
- push(r0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
}
@@ -2847,6 +2881,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
+
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2935,12 +2981,11 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -2948,11 +2993,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
}
void MacroAssembler::AllocateHeapNumberWithValue(
@@ -2971,7 +3012,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3539,7 +3581,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3550,11 +3592,13 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- XorP(r0, scratch_reg, Operand(new_space_allocation_top));
+ AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ LoadP(ip, MemOperand(ip));
+ XorP(r0, scratch_reg, ip);
AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
beq(&top_check, Label::kNear);
// The object is on a different page than allocation top. Bail out if the
@@ -3568,7 +3612,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- CmpP(scratch_reg, Operand(new_space_allocation_top));
+ CmpP(scratch_reg, ip);
bgt(no_memento_found);
// Memento map check.
bind(&map_check);
@@ -3587,8 +3631,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
@@ -3654,6 +3697,36 @@ void MacroAssembler::mov(Register dst, const Operand& src) {
#endif
}
+void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
+ if (is_uint12(src1.offset())) {
+ ms(dst, src1);
+ } else if (is_int20(src1.offset())) {
+ msy(dst, src1);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void MacroAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
+
+void MacroAssembler::Mul32(Register dst, const Operand& src1) {
+ msfi(dst, src1);
+}
+
+void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
+ if (is_int20(src1.offset())) {
+ msg(dst, src1);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void MacroAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
+
+void MacroAssembler::Mul64(Register dst, const Operand& src1) {
+ msgfi(dst, src1);
+}
+
void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
if (dst.is(src2)) {
MulP(dst, src1);
@@ -4033,15 +4106,18 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
// Subtract 32-bit (Register = Register - Register)
void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
- if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
return;
}
if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (!dst.is(src1) && dst.is(src2)) {
- sr(dst, src1); // dst = (dst - src)
+ Label done;
lcr(dst, dst); // dst = -dst
+ b(overflow, &done);
+ ar(dst, src1); // dst = dst + src
+ bind(&done);
} else {
sr(dst, src2);
}
@@ -4050,15 +4126,18 @@ void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
// Subtract Pointer Sized (Register = Register - Register)
void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
- if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
SubP_RRR(dst, src1, src2);
return;
}
if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (!dst.is(src1) && dst.is(src2)) {
- SubP(dst, src1); // dst = (dst - src)
+ Label done;
LoadComplementRR(dst, dst); // dst = -dst
+ b(overflow, &done);
+ AddP(dst, src1); // dst = dst + src
+ bind(&done);
} else {
SubP(dst, src2);
}
@@ -4076,8 +4155,8 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
// In scenario where we have dst = src - dst, we need to swap and negate
if (!dst.is(src1) && dst.is(src2)) {
lgfr(dst, dst); // Sign extend this operand first.
- SubP(dst, src1); // dst = (dst - src)
LoadComplementRR(dst, dst); // dst = -dst
+ AddP(dst, src1); // dst = -dst + src
} else {
sgfr(dst, src2);
}
@@ -4459,12 +4538,22 @@ void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
XorP(dst, opnd);
}
-void MacroAssembler::NotP(Register dst) {
-#if V8_TARGET_ARCH_S390X
+void MacroAssembler::Not32(Register dst, Register src) {
+ if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
+ xilf(dst, Operand(0xFFFFFFFF));
+}
+
+void MacroAssembler::Not64(Register dst, Register src) {
+ if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
+}
+
+void MacroAssembler::NotP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ Not64(dst, src);
#else
- XorP(dst, Operand(0xFFFFFFFF));
+ Not32(dst, src);
#endif
}
@@ -4639,7 +4728,6 @@ void MacroAssembler::Branch(Condition c, const Operand& opnd) {
// Branch On Count. Decrement R1, and branch if R1 != 0.
void MacroAssembler::BranchOnCount(Register r1, Label* l) {
int32_t offset = branch_offset(l);
- positions_recorder()->WriteRecordedPositions();
if (is_int16(offset)) {
#if V8_TARGET_ARCH_S390X
brctg(r1, Operand(offset));
@@ -4953,6 +5041,22 @@ void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
#endif
}
+void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ llgh(dst, mem);
+#else
+ llh(dst, mem);
+#endif
+}
+
+void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ llghr(dst, src);
+#else
+ llhr(dst, src);
+#endif
+}
+
void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
@@ -4961,6 +5065,14 @@ void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
#endif
}
+void MacroAssembler::LoadB(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ lgbr(dst, src);
+#else
+ lbr(dst, src);
+#endif
+}
+
void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
@@ -4969,6 +5081,20 @@ void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#endif
}
+void MacroAssembler::LoadLogicalReversedWordP(Register dst,
+ const MemOperand& mem) {
+ lrv(dst, mem);
+ LoadlW(dst, dst);
+}
+
+
+void MacroAssembler::LoadLogicalReversedHalfWordP(Register dst,
+ const MemOperand& mem) {
+ lrvh(dst, mem);
+ LoadLogicalHalfWordP(dst, dst);
+}
+
+
// Load And Test (Reg <- Reg)
void MacroAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
@@ -5009,6 +5135,16 @@ void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
#endif
}
+// Load On Condition Pointer Sized (Reg <- Reg)
+void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
+ Register src) {
+#if V8_TARGET_ARCH_S390X
+ locgr(cond, dst, src);
+#else
+ locr(cond, dst, src);
+#endif
+}
+
// Load Double Precision (64-bit) Floating Point number from memory
void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
// for 32bit and 64bit we all use 64bit floating point regs
@@ -5300,7 +5436,7 @@ void MacroAssembler::Popcnt32(Register dst, Register src) {
ar(dst, r0);
ShiftRight(r0, dst, Operand(8));
ar(dst, r0);
- lbr(dst, dst);
+ LoadB(dst, dst);
}
#ifdef V8_TARGET_ARCH_S390X
@@ -5315,7 +5451,7 @@ void MacroAssembler::Popcnt64(Register dst, Register src) {
AddP(dst, r0);
ShiftRightP(r0, dst, Operand(8));
AddP(dst, r0);
- lbr(dst, dst);
+ LoadB(dst, dst);
}
#endif