summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/pipeline.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/pipeline.cc')
-rw-r--r--deps/v8/src/compiler/pipeline.cc307
1 files changed, 224 insertions, 83 deletions
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e771cef123..eb060b71e1 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,6 +16,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
+#include "src/compiler/add-type-assertions-reducer.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -34,6 +35,7 @@
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/csa-load-elimination.h"
#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/decompression-elimination.h"
#include "src/compiler/effect-control-linearizer.h"
@@ -114,7 +116,8 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker_enabled())),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
@@ -266,7 +269,7 @@ class PipelineData {
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
MachineGraph* mcgraph() const { return mcgraph_; }
- Handle<Context> native_context() const {
+ Handle<NativeContext> native_context() const {
return handle(info()->native_context(), isolate());
}
Handle<JSGlobalObject> global_object() const {
@@ -324,7 +327,8 @@ class PipelineData {
Typer* CreateTyper() {
DCHECK_NULL(typer_);
- typer_ = new Typer(broker(), typer_flags_, graph());
+ typer_ =
+ new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
return typer_;
}
@@ -397,7 +401,8 @@ class PipelineData {
DCHECK_NULL(frame_);
int fixed_frame_size = 0;
if (call_descriptor != nullptr) {
- fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
+ fixed_frame_size =
+ call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
@@ -408,7 +413,8 @@ class PipelineData {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), flags, debug_name());
+ sequence(), flags, &info()->tick_counter(),
+ debug_name());
}
void InitializeOsrHelper() {
@@ -1040,6 +1046,119 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
+class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
+ public:
+ WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph,
+ Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions)
+ // Note that the OptimizedCompilationInfo is not initialized at the time
+ // we pass it to the CompilationJob constructor, but it is not
+ // dereferenced there.
+ : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
+ "TurboFan"),
+ debug_name_(std::move(debug_name)),
+ info_(CStrVector(debug_name_.get()), graph->zone(), kind),
+ call_descriptor_(call_descriptor),
+ zone_stats_(isolate->allocator()),
+ zone_(std::move(zone)),
+ graph_(graph),
+ data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
+ new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
+ pipeline_(&data_) {}
+
+ ~WasmHeapStubCompilationJob() = default;
+
+ protected:
+ Status PrepareJobImpl(Isolate* isolate) final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
+
+ private:
+ std::unique_ptr<char[]> debug_name_;
+ OptimizedCompilationInfo info_;
+ CallDescriptor* call_descriptor_;
+ ZoneStats zone_stats_;
+ std::unique_ptr<Zone> zone_;
+ Graph* graph_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
+};
+
+// static
+std::unique_ptr<OptimizedCompilationJob>
+Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone,
+ Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions) {
+ return base::make_unique<WasmHeapStubCompilationJob>(
+ isolate, call_descriptor, std::move(zone), graph, kind,
+ std::move(debug_name), options, source_positions);
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info_, isolate->GetTurboStatistics(), &zone_stats_));
+ pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
+ }
+ if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data_.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+ if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind())
+ << " graph -- " << std::endl
+ << AsRPO(*data_.graph());
+ }
+
+ if (info_.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info_, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info_.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+ pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
+ pipeline_.ComputeScheduledGraph();
+ if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
+ return CompilationJob::SUCCEEDED;
+ }
+ return CompilationJob::FAILED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
+ Handle<Code> code;
+ if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) &&
+ pipeline_.CommitDependencies(code)) {
+ info_.SetCode(code);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble(compilation_info()->GetDebugName().get(), os);
+ }
+#endif
+ return SUCCEEDED;
+ }
+ return FAILED;
+}
+
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
@@ -1065,7 +1184,7 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
- SourcePosition::kNotInlined, flags);
+ SourcePosition::kNotInlined, flags, &data->info()->tick_counter());
}
};
@@ -1102,7 +1221,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
Isolate* isolate = data->isolate();
OptimizedCompilationInfo* info = data->info();
- GraphReducer graph_reducer(temp_zone, data->graph(),
+ GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1196,6 +1315,7 @@ struct UntyperPhase {
}
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -1216,6 +1336,7 @@ struct CopyMetadataForConcurrentCompilePhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
@@ -1242,13 +1363,13 @@ struct SerializationPhase {
if (data->info()->is_source_positions_enabled()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
- if (data->info()->is_osr()) {
- flags |= SerializerForBackgroundCompilationFlag::kOsr;
+ if (data->info()->is_analyze_environment_liveness()) {
+ flags |=
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- SerializerForBackgroundCompilation serializer(
- data->broker(), data->dependencies(), temp_zone,
- data->info()->closure(), flags);
- serializer.Run();
+ RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(),
+ temp_zone, data->info()->closure(),
+ flags, data->info()->osr_offset());
}
};
@@ -1257,6 +1378,7 @@ struct TypedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1292,9 +1414,12 @@ struct EscapeAnalysisPhase {
static const char* phase_name() { return "V8.TFEscapeAnalysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
+ EscapeAnalysis escape_analysis(data->jsgraph(),
+ &data->info()->tick_counter(), temp_zone);
escape_analysis.ReduceGraph();
- GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead());
+ GraphReducer reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
@@ -1305,13 +1430,28 @@ struct EscapeAnalysisPhase {
}
};
+struct TypeAssertionsPhase {
+ static const char* phase_name() { return "V8.TFTypeAssertions"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &type_assertions);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "V8.TFSimplifiedLowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel());
+ data->info()->GetPoisoningMitigationLevel(),
+ &data->info()->tick_counter());
lowering.LowerAllNodes();
}
};
@@ -1325,8 +1465,8 @@ struct LoopPeelingPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- LoopTree* loop_tree =
- LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(
+ data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
data->source_positions(), data->node_origins())
.PeelInnerLoopsOfTree();
@@ -1346,6 +1486,7 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer);
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -1358,6 +1499,7 @@ struct EarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1384,7 +1526,8 @@ struct ControlFlowOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
ControlFlowOptimizer optimizer(data->graph(), data->common(),
- data->machine(), temp_zone);
+ data->machine(),
+ &data->info()->tick_counter(), temp_zone);
optimizer.Optimize();
}
};
@@ -1406,8 +1549,9 @@ struct EffectControlLinearizationPhase {
// fix the effect and control flow for nodes with low-level side
// effects (such as changing representation to tagged or
// 'floating' allocation regions.)
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kTempSchedule);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ temp_zone, data->graph(), Scheduler::kTempSchedule,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
TraceSchedule(data->info(), data, schedule,
"effect linearization schedule");
@@ -1433,6 +1577,7 @@ struct EffectControlLinearizationPhase {
// doing a common operator reducer and dead code elimination just before
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1455,7 +1600,8 @@ struct StoreStoreEliminationPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- StoreStoreElimination::Run(data->jsgraph(), temp_zone);
+ StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
+ temp_zone);
}
};
@@ -1464,6 +1610,7 @@ struct LoadEliminationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1513,7 +1660,7 @@ struct MemoryOptimizationPhase {
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
- data->debug_name());
+ data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}
};
@@ -1523,6 +1670,7 @@ struct LateOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1555,6 +1703,7 @@ struct MachineOperatorOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -1565,11 +1714,38 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct CsaEarlyOptimizationPhase {
+ static const char* phase_name() { return "V8.CSAEarlyOptimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct CsaOptimizationPhase {
static const char* phase_name() { return "V8.CSAOptimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1621,9 +1797,10 @@ struct ComputeSchedulePhase {
void Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = Scheduler::ComputeSchedule(
- temp_zone, data->graph(), data->info()->is_splitting_enabled()
- ? Scheduler::kSplitNodes
- : Scheduler::kNoFlags);
+ temp_zone, data->graph(),
+ data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
data->set_schedule(schedule);
}
@@ -1671,6 +1848,7 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
+ &data->info()->tick_counter(),
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1920,7 +2098,8 @@ struct PrintGraphPhase {
Schedule* schedule = data->schedule();
if (schedule == nullptr) {
schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kNoFlags);
+ Scheduler::kNoFlags,
+ &info->tick_counter());
}
AllowHandleDereference allow_deref;
@@ -2089,6 +2268,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
}
+ if (FLAG_assert_types) {
+ Run<TypeAssertionsPhase>();
+ RunPrintAndVerify(TypeAssertionsPhase::phase_name());
+ }
+
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
@@ -2201,6 +2385,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
}
+ pipeline.Run<CsaEarlyOptimizationPhase>();
+ pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
+
// Optimize memory access and allocation operations.
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
@@ -2331,58 +2518,6 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
// static
-MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
- SourcePositionTable* source_positions) {
- OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
- // Construct a pipeline for scheduling and code generation.
- ZoneStats zone_stats(isolate->allocator());
- NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
- PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
- source_positions, node_positions, nullptr, options);
- std::unique_ptr<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(
- &info, isolate->GetTurboStatistics(), &zone_stats));
- pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
- }
-
- PipelineImpl pipeline(&data);
-
- if (info.trace_turbo_json_enabled() ||
- info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
- << std::endl
- << AsRPO(*graph);
- }
-
- if (info.trace_turbo_json_enabled()) {
- TurboJsonFile json_of(&info, std::ios_base::trunc);
- json_of << "{\"function\":\"" << info.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- }
-
- pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
- pipeline.ComputeScheduledGraph();
-
- Handle<Code> code;
- if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
- pipeline.CommitDependencies(code)) {
- return code;
- }
- return MaybeHandle<Code>();
-}
-
-// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker) {
@@ -2449,11 +2584,11 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-OptimizedCompilationJob* Pipeline::NewCompilationJob(
+std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
- return new PipelineCompilationJob(isolate, shared, function);
+ return base::make_unique<PipelineCompilationJob>(isolate, shared, function);
}
// static
@@ -2490,13 +2625,14 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
data.BeginPhaseKind("V8.WasmOptimization");
- const bool is_asm_js = module->origin == wasm::kAsmJsOrigin;
+ const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
data.info()->MarkAsSplittingEnabled();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
@@ -2515,6 +2651,7 @@ void Pipeline::GenerateCodeForWasmFunction(
} else {
PipelineRunScope scope(&data, "V8.WasmBaseOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
@@ -2870,8 +3007,9 @@ bool PipelineImpl::SelectInstructionsAndAssemble(
}
MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
- if (!SelectInstructionsAndAssemble(call_descriptor))
+ if (!SelectInstructionsAndAssemble(call_descriptor)) {
return MaybeHandle<Code>();
+ }
return FinalizeCode();
}
@@ -2928,6 +3066,9 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (data->info()->is_turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ if (data->info()->trace_turbo_allocation_enabled()) {
+ flags |= RegisterAllocationFlag::kTraceAllocation;
+ }
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());