summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32/codegen-ia32.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ia32/codegen-ia32.cc')
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc203
1 files changed, 201 insertions, 2 deletions
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index eb6868729b..9477bf149a 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -102,6 +102,43 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+UnaryMathFunction CreateExpFunction() {
+ if (!CpuFeatures::IsSupported(SSE2)) return &exp;
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // esp[1 * kPointerSize]: raw double input
+ // esp[0 * kPointerSize]: return address
+ {
+ CpuFeatures::Scope use_sse2(SSE2);
+ XMMRegister input = xmm1;
+ XMMRegister result = xmm2;
+ __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ push(eax);
+ __ push(ebx);
+
+ MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
+
+ __ pop(ebx);
+ __ pop(eax);
+ __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ fld_d(Operand(esp, 1 * kPointerSize));
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
@@ -732,7 +769,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -751,12 +788,174 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzx_b(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi index");
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi value");
+
+ __ cmp(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ cmp(index, Immediate(Smi::FromInt(0)));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ __ SmiUntag(value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ SmiUntag(index);
+ __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ }
+}
+
+
+static Operand ExpConstant(int index) {
+ return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2) {
+ ASSERT(!input.is(double_scratch));
+ ASSERT(!input.is(result));
+ ASSERT(!result.is(double_scratch));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ movdbl(double_scratch, ExpConstant(0));
+ __ xorpd(result, result);
+ __ ucomisd(double_scratch, input);
+ __ j(above_equal, &done);
+ __ ucomisd(input, ExpConstant(1));
+ __ movdbl(result, ExpConstant(2));
+ __ j(above_equal, &done);
+ __ movdbl(double_scratch, ExpConstant(3));
+ __ movdbl(result, ExpConstant(4));
+ __ mulsd(double_scratch, input);
+ __ addsd(double_scratch, result);
+ __ movd(temp2, double_scratch);
+ __ subsd(double_scratch, result);
+ __ movdbl(result, ExpConstant(6));
+ __ mulsd(double_scratch, ExpConstant(5));
+ __ subsd(double_scratch, input);
+ __ subsd(result, double_scratch);
+ __ movsd(input, double_scratch);
+ __ mulsd(input, double_scratch);
+ __ mulsd(result, input);
+ __ mov(temp1, temp2);
+ __ mulsd(result, ExpConstant(7));
+ __ subsd(result, double_scratch);
+ __ add(temp1, Immediate(0x1ff800));
+ __ addsd(result, ExpConstant(8));
+ __ and_(temp2, Immediate(0x7ff));
+ __ shr(temp1, 11);
+ __ shl(temp1, 20);
+ __ movd(input, temp1);
+ __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
+ __ movdbl(double_scratch, Operand::StaticArray(
+ temp2, times_8, ExternalReference::math_exp_log_table()));
+ __ por(input, double_scratch);
+ __ mulsd(result, input);
__ bind(&done);
}
#undef __
+static const int kNoCodeAgeSequenceLength = 5;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ static bool initialized = false;
+ static byte sequence[kNoCodeAgeSequenceLength];
+ *length = kNoCodeAgeSequenceLength;
+ if (!initialized) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->push(ebp);
+ patcher.masm()->mov(ebp, esp);
+ patcher.masm()->push(esi);
+ patcher.masm()->push(edi);
+ initialized = true;
+ }
+ return sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = (!memcmp(sequence, young_sequence, young_length));
+ ASSERT(result || *sequence == kCallOpcode);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32