summaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc/macro-assembler-ppc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ppc/macro-assembler-ppc.cc')
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc108
1 files changed, 23 insertions, 85 deletions
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 13e04a2c8c..5605907d6f 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -20,6 +20,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/ppc/macro-assembler-ppc.h"
@@ -196,6 +197,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
@@ -212,18 +214,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
-int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
-
void TurboAssembler::Call(Register target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
-
// branch via link register and set LK bit for return point
mtctr(target);
bctrl();
-
- DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallJSEntry(Register target) {
@@ -231,12 +226,6 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
- Condition cond) {
- Operand mov_operand = Operand(target, rmode);
- return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
-}
-
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
@@ -248,13 +237,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(cond == al);
-#ifdef DEBUG
- // Check the expected size before generating code to ensure we assume the same
- // constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallSize(target, rmode, cond);
- Label start;
- bind(&start);
-#endif
// This can likely be optimized to make use of bc() with 24bit relative
//
// RecordRelocInfo(x.rmode_, x.immediate);
@@ -264,13 +246,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
mov(ip, Operand(target, rmode));
mtctr(ip);
bctrl();
-
- DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- return CallSize(code.address(), rmode, cond);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@@ -294,6 +269,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
DCHECK(Builtins::IsBuiltinId(builtin_index));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -933,11 +909,10 @@ void TurboAssembler::LoadPC(Register dst) {
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
- Label current_pc;
- mov_label_addr(dst, &current_pc);
-
- bind(&current_pc);
- subi(dst, dst, Operand(pc_offset()));
+ mflr(r0);
+ LoadPC(dst);
+ subi(dst, dst, Operand(pc_offset() - kInstrSize));
+ mtlr(r0);
}
void TurboAssembler::LoadConstantPoolPointerRegister() {
@@ -1789,6 +1764,18 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ mov(r3, Operand(static_cast<int>(reason)));
+ PrepareCallCFunction(1, 0, r4);
+ Move(ip, ExternalReference::abort_with_reason());
+ // Use Call directly to avoid any unneeded overhead. The function won't
+ // return anyway.
+ Call(ip);
+ return;
+ }
+
LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -2910,8 +2897,10 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
}
void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
- if (dst.ra() != r0) DCHECK(!AreAliased(src, dst.ra(), scratch));
- if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
+ if (dst.ra() != r0 && dst.ra().is_valid())
+ DCHECK(!AreAliased(src, dst.ra(), scratch));
+ if (dst.rb() != r0 && dst.rb().is_valid())
+ DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
mr(scratch, src);
LoadP(src, dst, r0);
@@ -3004,57 +2993,6 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
StoreDouble(scratch_1, src, r0);
}
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8,
- Register reg9, Register reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-
-bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
- DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
- DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
- DoubleRegister reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}