aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-02-14 11:27:26 +0100
committerMichaël Zasso <targos@protonmail.com>2017-02-22 15:55:42 +0100
commit7a77daf24344db7942e34c962b0f1ee729ab7af5 (patch)
treee7cbe7bf4e2f4b802a8f5bc18336c546cd6a0d7f /deps/v8/src/ppc
parent5f08871ee93ea739148cc49e0f7679e33c70295a (diff)
downloadandroid-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.gz
android-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.bz2
android-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.zip
deps: update V8 to 5.6.326.55
PR-URL: https://github.com/nodejs/node/pull/10992 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h3
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc453
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc15
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc268
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h28
5 files changed, 63 insertions, 704 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 7843e2e07d..f49ac6305e 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -1216,7 +1216,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index ce423ea53a..a48fc06116 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -561,7 +561,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ and_(r5, lhs, rhs);
__ JumpIfNotSmi(r5, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1576,13 +1576,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ SmiToShortArrayOffset(r4, r4);
__ addi(r4, r4, Operand(2));
- __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r3, &runtime);
- __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
- __ bne(&runtime);
+ // Check that the last match info is a FixedArray.
+ __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ LoadP(last_match_info_elements,
- FieldMemOperand(r3, JSArray::kElementsOffset));
__ LoadP(r3,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
@@ -1591,7 +1588,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ LoadP(
r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
+ __ addi(r5, r4, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ SmiUntag(r0, r3);
__ cmp(r5, r0);
__ bgt(&runtime);
@@ -1601,21 +1598,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the capture count.
__ SmiTag(r5, r4);
__ StoreP(r5, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset),
+ RegExpMatchInfo::kNumberOfCapturesOffset),
r0);
// Store last subject and last input.
__ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset),
+ RegExpMatchInfo::kLastSubjectOffset),
r0);
__ mr(r5, subject);
- __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
- subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset, subject, r10,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mr(subject, r5);
__ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset),
+ RegExpMatchInfo::kLastInputOffset),
r0);
- __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
- subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset, subject, r10,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1626,10 +1625,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r5: offsets vector
Label next_capture;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ addi(
- r3, last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+ // counts down until wrapping after zero.
+ __ addi(r3, last_match_info_elements,
+ Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
+ kPointerSize));
__ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu
__ mtctr(r4);
__ bind(&next_capture);
@@ -1641,7 +1640,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bdnz(&next_capture);
// Return last match info.
- __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+ __ mr(r3, last_match_info_elements);
__ addi(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -1873,6 +1872,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // r3 - number of arguments
// r4 - function
// r6 - slot id
// r5 - vector
@@ -1881,25 +1881,22 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ cmp(r4, r8);
__ bne(miss);
- __ mov(r3, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r5, r6, r0);
__ mr(r5, r7);
__ mr(r6, r4);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
+ // r3 - number of arguments
// r4 - function
// r6 - slot id (Smi)
// r5 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does r4 match the recorded monomorphic target?
__ SmiToPtrArrayOffset(r9, r6);
@@ -1933,7 +1930,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r5, r6, r0);
- __ mov(r3, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1977,7 +1973,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
IncrementCallCount(masm, r5, r6, r0);
__ bind(&call_count_incremented);
- __ mov(r3, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2010,13 +2005,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r5);
- __ Push(r6);
- __ Push(cp, r4);
+ __ SmiTag(r3);
+ __ Push(r3, r5, r6, cp, r4);
__ CallStub(&create_stub);
- __ Pop(cp, r4);
- __ Pop(r6);
- __ Pop(r5);
+ __ Pop(r5, r6, cp, r4);
+ __ Pop(r3);
+ __ SmiUntag(r3);
}
__ b(&call_function);
@@ -2033,14 +2027,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the function and feedback info.
- __ Push(r4, r5, r6);
+ // Preserve the number of arguments as Smi.
+ __ SmiTag(r3);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(r3, r4, r5, r6);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
+
+ // Restore number of arguments.
+ __ Pop(r3);
+ __ SmiUntag(r3);
}
@@ -3195,21 +3196,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r5);
CallICStub stub(isolate(), state());
@@ -3217,14 +3203,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3318,184 +3296,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(ip);
}
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
- Register name = LoadWithVectorDescriptor::NameRegister(); // r5
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&miss);
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, r10);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
- Register key = LoadWithVectorDescriptor::NameRegister(); // r5
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ bne(&miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
- Register key = StoreWithVectorDescriptor::NameRegister(); // r5
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
- Register feedback = r8;
- Register receiver_map = r9;
- Register scratch1 = r10;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- Register scratch2 = r11;
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&miss);
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3862,30 +3668,19 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ cmpi(r3, Operand::Zero());
- __ bne(&not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ Label not_zero_case, not_one_case;
+ __ cmpi(r3, Operand::Zero());
+ __ bne(&not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- __ bind(&not_zero_case);
- __ cmpi(r3, Operand(1));
- __ bgt(&not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
+ __ bind(&not_zero_case);
+ __ cmpi(r3, Operand(1));
+ __ bgt(&not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -3937,23 +3732,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ StorePX(r4, MemOperand(sp, r0));
- __ addi(r3, r3, Operand(3));
- break;
- case NONE:
- __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
- __ li(r3, Operand(3));
- break;
- case ONE:
- __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
- __ li(r3, Operand(4));
- break;
- }
-
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r0));
+ __ addi(r3, r3, Operand(3));
__ Push(r6, r5);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4385,7 +4166,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::kZero, r0);
if (CpuFeatures::IsSupported(ISELECT)) {
__ SmiToPtrArrayOffset(r11, r9);
__ addi(r11, r11, Operand(kParameterMapHeaderSize));
@@ -4467,7 +4248,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// r9 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::kZero, r0);
if (CpuFeatures::IsSupported(ISELECT)) {
__ isel(eq, r4, r7, r4);
__ beq(&skip_parameter_map);
@@ -4690,134 +4471,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register value = r3;
- Register slot = r5;
-
- Register cell = r4;
- Register cell_details = r6;
- Register cell_value = r7;
- Register cell_value_map = r8;
- Register scratch = r9;
-
- Register context = cp;
- Register context_temp = cell;
-
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue);
- }
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); i++) {
- __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = context_temp;
- }
-
- // Load the PropertyCell at the specified slot.
- __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
- __ add(cell, context, r0);
- __ LoadP(cell, ContextMemOperand(cell));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details);
- __ andi(cell_details, cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ bne(&not_mutable_data);
- __ JumpIfSmi(value, &fast_smi_case);
-
- __ bind(&fast_heapobject_case);
- __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
- // RecordWriteField clobbers the value register, so we copy it before the
- // call.
- __ mr(r6, value);
- __ RecordWriteField(cell, PropertyCell::kValueOffset, r6, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&not_mutable_data);
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ cmp(cell_value, value);
- __ bne(&not_same_value);
-
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ andi(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
- __ bne(&slow_case, cr0);
-
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ cmpi(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ beq(&done);
- __ cmpi(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ beq(&done);
- __ cmpi(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(eq, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ bne(&slow_case);
-
- // Now either both old and new values must be smis or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value, &slow_case);
- // Old and new values are smis, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
- __ Ret();
-
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value, &slow_case);
-
- __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
- __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ cmp(cell_value_map, scratch);
- __ beq(&fast_heapobject_case);
-
- // Fallback to runtime.
- __ bind(&slow_case);
- __ SmiTag(slot);
- __ Push(slot, value);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -5113,7 +4766,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch, holder);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(Smi::kZero); // should_throw_on_error -> false
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 3ff0fde047..74ad56405f 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -29,9 +29,9 @@ const Register LoadDescriptor::ReceiverRegister() { return r4; }
const Register LoadDescriptor::NameRegister() { return r5; }
const Register LoadDescriptor::SlotRegister() { return r3; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r7; }
const Register StoreDescriptor::ReceiverRegister() { return r4; }
const Register StoreDescriptor::NameRegister() { return r5; }
@@ -44,10 +44,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
const Register StoreTransitionDescriptor::MapRegister() { return r8; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return r4; }
const Register StringCompareDescriptor::RightRegister() { return r3; }
@@ -157,7 +153,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r6, r5};
+ Register registers[] = {r4, r3, r6, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -206,13 +202,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r4};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 9b5f80ebe9..6588540035 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -1605,90 +1605,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch, Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!holder_reg.is(ip));
- DCHECK(!scratch.is(ip));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- DCHECK(!ip.is(scratch));
- mr(ip, fp);
- bind(&load_context);
- LoadP(scratch,
- MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch, &has_context);
- LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
- b(&load_context);
- bind(&has_context);
-
-// In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- cmpi(scratch, Operand::Zero());
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
- // Load the native context of the current context.
- LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- cmp(scratch, ip);
- beq(&same_contexts);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- mr(holder_reg, ip); // Move ip to its holding place.
- LoadRoot(ip, Heap::kNullValueRootIndex);
- cmp(holder_reg, ip);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
- LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- LoadP(scratch, FieldMemOperand(scratch, token_offset));
- LoadP(ip, FieldMemOperand(ip, token_offset));
- cmp(scratch, ip);
- bne(miss);
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -1729,86 +1645,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
ExtractBitRange(t0, t0, 29, 0);
}
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
- Register key, Register result,
- Register t0, Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- GetNumberHash(t0, t1);
-
- // Compute the capacity mask.
- LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- SmiUntag(t1);
- subi(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- mr(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(t2, t2, t1);
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- slwi(ip, t2, Operand(1));
- add(t2, t2, ip); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- slwi(t2, t2, Operand(kPointerSizeLog2));
- add(t2, elements, t2);
- LoadP(ip,
- FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
- cmp(key, ip);
- if (i != kNumberDictionaryProbes - 1) {
- beq(&done);
- } else {
- bne(miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
- LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
- DCHECK_EQ(DATA, 0);
- and_(r0, t1, ip, SetRC);
- bne(miss, cr0);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- LoadP(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
@@ -2234,20 +2070,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
cmp(obj, r0);
}
-
-void MacroAssembler::CheckFastElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
- cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- bgt(fail);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
@@ -2525,18 +2347,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
SmiUntag(ip, smi);
ConvertIntToDouble(ip, value);
@@ -3282,73 +3092,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
- Register scratch) {
- Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
-
- DCHECK(!scratch.is(r0));
-
- cmpi(length, Operand::Zero());
- beq(&done);
-
- // Check src alignment and length to see whether word_loop is possible
- andi(scratch, src, Operand(kPointerSize - 1));
- beq(&aligned, cr0);
- subfic(scratch, scratch, Operand(kPointerSize * 2));
- cmp(length, scratch);
- blt(&byte_loop);
-
- // Align src before copying in word size chunks.
- subi(scratch, scratch, Operand(kPointerSize));
- mtctr(scratch);
- bind(&align_loop);
- lbz(scratch, MemOperand(src));
- addi(src, src, Operand(1));
- subi(length, length, Operand(1));
- stb(scratch, MemOperand(dst));
- addi(dst, dst, Operand(1));
- bdnz(&align_loop);
-
- bind(&aligned);
-
- // Copy bytes in word size chunks.
- if (emit_debug_code()) {
- andi(r0, src, Operand(kPointerSize - 1));
- Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
- }
-
- ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
- cmpi(scratch, Operand::Zero());
- beq(&byte_loop);
-
- mtctr(scratch);
- bind(&word_loop);
- LoadP(scratch, MemOperand(src));
- addi(src, src, Operand(kPointerSize));
- subi(length, length, Operand(kPointerSize));
-
- StoreP(scratch, MemOperand(dst));
- addi(dst, dst, Operand(kPointerSize));
- bdnz(&word_loop);
-
- // Copy the last bytes if any left.
- cmpi(length, Operand::Zero());
- beq(&done);
-
- bind(&byte_loop);
- mtctr(length);
- bind(&byte_loop_1);
- lbz(scratch, MemOperand(src));
- addi(src, src, Operand(1));
- stb(scratch, MemOperand(dst));
- addi(dst, dst, Operand(1));
- bdnz(&byte_loop_1);
-
- bind(&done);
-}
-
-
void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {
@@ -3451,7 +3194,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
cmp(index, ip);
Check(lt, kIndexIsTooLarge);
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
cmpi(index, Operand::Zero());
Check(ge, kIndexIsNegative);
@@ -3828,7 +3571,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(r6, r4);
- CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+ CmpSmiLiteral(r6, Smi::kZero, r0);
bne(call_runtime);
bind(&start);
@@ -4687,7 +4430,8 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
Register mask = scratch2_reg;
DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
@@ -4697,7 +4441,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
- addi(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ addi(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
@@ -4718,7 +4462,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// we are below top.
bind(&top_check);
cmp(scratch_reg, ip);
- bgt(no_memento_found);
+ bge(no_memento_found);
// Memento map check.
bind(&map_check);
LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index ba4d277688..28eceb18a4 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -662,19 +662,8 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
- Label* miss);
-
void GetNumberHash(Register t0, Register scratch);
- void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
- Register result, Register t0, Register t1,
- Register t2);
-
-
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
@@ -769,11 +758,6 @@ class MacroAssembler : public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src, Register dst, Register length, Register scratch);
-
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.
@@ -819,11 +803,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map, Register scratch, Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -912,13 +891,6 @@ class MacroAssembler : public Assembler {
return eq;
}
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);