summaryrefslogtreecommitdiff
path: root/deps/v8/src/torque/implementation-visitor.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/torque/implementation-visitor.cc')
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc663
1 files changed, 326 insertions, 337 deletions
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index a0aeeee81b..8f36afd020 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -10,6 +10,7 @@
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
+#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
namespace v8 {
@@ -107,7 +108,8 @@ void ImplementationVisitor::EndCSAFiles() {
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
- Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(), {}};
+ Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
+ {}, false};
BindingsManagersScope bindings_managers_scope;
@@ -466,13 +468,13 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
: "UncheckedCast<Object>(Parameter(Descriptor::kReceiver))")
<< ";\n";
source_out() << "USE(" << generated_name << ");\n";
- expected_type = TypeOracle::GetObjectType();
+ expected_type = TypeOracle::GetJSAnyType();
} else if (param_name == "newTarget") {
source_out() << " TNode<Object> " << generated_name
<< " = UncheckedCast<Object>(Parameter("
<< "Descriptor::kJSNewTarget));\n";
source_out() << "USE(" << generated_name << ");\n";
- expected_type = TypeOracle::GetObjectType();
+ expected_type = TypeOracle::GetJSAnyType();
} else if (param_name == "target") {
source_out() << " TNode<JSFunction> " << generated_name
<< " = UncheckedCast<JSFunction>(Parameter("
@@ -646,24 +648,8 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
}
VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
- VisitResult left_result;
- {
- Block* false_block = assembler().NewBlock(assembler().CurrentStack());
- Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
- kFalseLabelName, LocalLabel{false_block}};
- left_result = Visit(expr->left);
- if (left_result.type()->IsBool()) {
- Block* true_block = LookupSimpleLabel(kTrueLabelName);
- assembler().Branch(true_block, false_block);
- assembler().Bind(false_block);
- } else if (left_result.type()->IsNever()) {
- assembler().Bind(false_block);
- } else if (!left_result.type()->IsConstexprBool()) {
- ReportError(
- "expected type bool, constexpr bool, or never on left-hand side of "
- "operator ||");
- }
- }
+ StackScope outer_scope(this);
+ VisitResult left_result = Visit(expr->left);
if (left_result.type()->IsConstexprBool()) {
VisitResult right_result = Visit(expr->right);
@@ -677,38 +663,34 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
" || " + right_result.constexpr_value() + ")");
}
- VisitResult right_result = Visit(expr->right);
- if (right_result.type()->IsBool()) {
- Block* true_block = LookupSimpleLabel(kTrueLabelName);
- Block* false_block = LookupSimpleLabel(kFalseLabelName);
- assembler().Branch(true_block, false_block);
- return VisitResult::NeverResult();
- } else if (!right_result.type()->IsNever()) {
- ReportError(
- "expected type bool or never on right-hand side of operator ||");
+ Block* true_block = assembler().NewBlock();
+ Block* false_block = assembler().NewBlock();
+ Block* done_block = assembler().NewBlock();
+
+ left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
+ GenerateBranch(left_result, true_block, false_block);
+
+ assembler().Bind(true_block);
+ VisitResult true_result = GenerateBoolConstant(true);
+ assembler().Goto(done_block);
+
+ assembler().Bind(false_block);
+ VisitResult false_result;
+ {
+ StackScope false_block_scope(this);
+ false_result = false_block_scope.Yield(
+ GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
}
- return right_result;
+ assembler().Goto(done_block);
+
+ assembler().Bind(done_block);
+ DCHECK_EQ(true_result, false_result);
+ return outer_scope.Yield(true_result);
}
VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
- VisitResult left_result;
- {
- Block* true_block = assembler().NewBlock(assembler().CurrentStack());
- Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
- kTrueLabelName, LocalLabel{true_block}};
- left_result = Visit(expr->left);
- if (left_result.type()->IsBool()) {
- Block* false_block = LookupSimpleLabel(kFalseLabelName);
- assembler().Branch(true_block, false_block);
- assembler().Bind(true_block);
- } else if (left_result.type()->IsNever()) {
- assembler().Bind(true_block);
- } else if (!left_result.type()->IsConstexprBool()) {
- ReportError(
- "expected type bool, constexpr bool, or never on left-hand side of "
- "operator &&");
- }
- }
+ StackScope outer_scope(this);
+ VisitResult left_result = Visit(expr->left);
if (left_result.type()->IsConstexprBool()) {
VisitResult right_result = Visit(expr->right);
@@ -722,17 +704,29 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
" && " + right_result.constexpr_value() + ")");
}
- VisitResult right_result = Visit(expr->right);
- if (right_result.type()->IsBool()) {
- Block* true_block = LookupSimpleLabel(kTrueLabelName);
- Block* false_block = LookupSimpleLabel(kFalseLabelName);
- assembler().Branch(true_block, false_block);
- return VisitResult::NeverResult();
- } else if (!right_result.type()->IsNever()) {
- ReportError(
- "expected type bool or never on right-hand side of operator &&");
+ Block* true_block = assembler().NewBlock();
+ Block* false_block = assembler().NewBlock();
+ Block* done_block = assembler().NewBlock();
+
+ left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
+ GenerateBranch(left_result, true_block, false_block);
+
+ assembler().Bind(true_block);
+ VisitResult true_result;
+ {
+ StackScope true_block_scope(this);
+ true_result = true_block_scope.Yield(
+ GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
}
- return right_result;
+ assembler().Goto(done_block);
+
+ assembler().Bind(false_block);
+ VisitResult false_result = GenerateBoolConstant(false);
+ assembler().Goto(done_block);
+
+ assembler().Bind(done_block);
+ DCHECK_EQ(true_result, false_result);
+ return outer_scope.Yield(true_result);
}
VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
@@ -1110,29 +1104,6 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
return TypeOracle::GetNeverType();
}
-VisitResult ImplementationVisitor::TemporaryUninitializedStruct(
- const StructType* struct_type, const std::string& reason) {
- StackRange range = assembler().TopRange(0);
- for (const Field& f : struct_type->fields()) {
- if (const StructType* struct_type =
- StructType::DynamicCast(f.name_and_type.type)) {
- range.Extend(
- TemporaryUninitializedStruct(struct_type, reason).stack_range());
- } else {
- std::string descriptor = "uninitialized field '" + f.name_and_type.name +
- "' declared at " + PositionAsString(f.pos) +
- " (" + reason + ")";
- TypeVector lowered_types = LowerType(f.name_and_type.type);
- for (const Type* type : lowered_types) {
- assembler().Emit(PushUninitializedInstruction{
- TypeOracle::GetTopType(descriptor, type)});
- }
- range.Extend(assembler().TopRange(lowered_types.size()));
- }
- }
- return VisitResult(struct_type, range);
-}
-
VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
size_t parameter_count = expr->label_block->parameters.names.size();
std::vector<VisitResult> parameters;
@@ -1211,15 +1182,38 @@ VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}
+void ImplementationVisitor::CheckInitializersWellformed(
+ const std::string& aggregate_name,
+ const std::vector<Field>& aggregate_fields,
+ const std::vector<NameAndExpression>& initializers,
+ bool ignore_first_field) {
+ size_t fields_offset = ignore_first_field ? 1 : 0;
+ size_t fields_size = aggregate_fields.size() - fields_offset;
+ for (size_t i = 0; i < std::min(fields_size, initializers.size()); i++) {
+ const std::string& field_name =
+ aggregate_fields[i + fields_offset].name_and_type.name;
+ Identifier* found_name = initializers[i].name;
+ if (field_name != found_name->value) {
+ Error("Expected field name \"", field_name, "\" instead of \"",
+ found_name->value, "\"")
+ .Position(found_name->pos)
+ .Throw();
+ }
+ }
+ if (fields_size != initializers.size()) {
+ ReportError("expected ", fields_size, " initializers for ", aggregate_name,
+ " found ", initializers.size());
+ }
+}
+
InitializerResults ImplementationVisitor::VisitInitializerResults(
- const AggregateType* current_aggregate,
+ const ClassType* class_type,
const std::vector<NameAndExpression>& initializers) {
InitializerResults result;
for (const NameAndExpression& initializer : initializers) {
result.names.push_back(initializer.name);
Expression* e = initializer.expression;
- const Field& field =
- current_aggregate->LookupField(initializer.name->value);
+ const Field& field = class_type->LookupField(initializer.name->value);
auto field_index = field.index;
if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
if (!field_index) {
@@ -1238,54 +1232,30 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
return result;
}
-size_t ImplementationVisitor::InitializeAggregateHelper(
- const AggregateType* aggregate_type, VisitResult allocate_result,
+void ImplementationVisitor::InitializeClass(
+ const ClassType* class_type, VisitResult allocate_result,
const InitializerResults& initializer_results) {
- const ClassType* current_class = ClassType::DynamicCast(aggregate_type);
- size_t current = 0;
- if (current_class) {
- const ClassType* super = current_class->GetSuperClass();
- if (super) {
- current = InitializeAggregateHelper(super, allocate_result,
- initializer_results);
- }
+ if (const ClassType* super = class_type->GetSuperClass()) {
+ InitializeClass(super, allocate_result, initializer_results);
}
- for (Field f : aggregate_type->fields()) {
- if (current == initializer_results.field_value_map.size()) {
- ReportError("insufficient number of initializers for ",
- aggregate_type->name());
- }
+ for (Field f : class_type->fields()) {
VisitResult current_value =
initializer_results.field_value_map.at(f.name_and_type.name);
- Identifier* fieldname = initializer_results.names[current];
- if (fieldname->value != f.name_and_type.name) {
- CurrentSourcePosition::Scope scope(fieldname->pos);
- ReportError("Expected fieldname \"", f.name_and_type.name,
- "\" instead of \"", fieldname->value, "\"");
- }
- if (aggregate_type->IsClassType()) {
- if (f.index) {
- InitializeFieldFromSpread(allocate_result, f, initializer_results);
- } else {
- allocate_result.SetType(aggregate_type);
- GenerateCopy(allocate_result);
- assembler().Emit(CreateFieldReferenceInstruction{
- ClassType::cast(aggregate_type), f.name_and_type.name});
- VisitResult heap_reference(
- TypeOracle::GetReferenceType(f.name_and_type.type),
- assembler().TopRange(2));
- GenerateAssignToLocation(
- LocationReference::HeapReference(heap_reference), current_value);
- }
+ if (f.index) {
+ InitializeFieldFromSpread(allocate_result, f, initializer_results);
} else {
- LocationReference struct_field_ref = LocationReference::VariableAccess(
- ProjectStructField(allocate_result, f.name_and_type.name));
- GenerateAssignToLocation(struct_field_ref, current_value);
+ allocate_result.SetType(class_type);
+ GenerateCopy(allocate_result);
+ assembler().Emit(CreateFieldReferenceInstruction{
+ ClassType::cast(class_type), f.name_and_type.name});
+ VisitResult heap_reference(
+ TypeOracle::GetReferenceType(f.name_and_type.type),
+ assembler().TopRange(2));
+ GenerateAssignToLocation(LocationReference::HeapReference(heap_reference),
+ current_value);
}
- ++current;
}
- return current;
}
void ImplementationVisitor::InitializeFieldFromSpread(
@@ -1304,17 +1274,6 @@ void ImplementationVisitor::InitializeFieldFromSpread(
{field.aggregate, index.type, iterator.type()});
}
-void ImplementationVisitor::InitializeAggregate(
- const AggregateType* aggregate_type, VisitResult allocate_result,
- const InitializerResults& initializer_results) {
- size_t consumed_initializers = InitializeAggregateHelper(
- aggregate_type, allocate_result, initializer_results);
- if (consumed_initializers != initializer_results.field_value_map.size()) {
- ReportError("more initializers than fields present in ",
- aggregate_type->name());
- }
-}
-
VisitResult ImplementationVisitor::AddVariableObjectSize(
VisitResult object_size, const ClassType* current_class,
const InitializerResults& initializer_results) {
@@ -1397,6 +1356,11 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
initializer_results.field_value_map[map_field.name_and_type.name] =
object_map;
}
+
+ CheckInitializersWellformed(class_type->name(),
+ class_type->ComputeAllFields(),
+ expr->initializers, !class_type->IsExtern());
+
Arguments size_arguments;
size_arguments.parameters.push_back(object_map);
VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
@@ -1411,7 +1375,7 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
DCHECK(allocate_result.IsOnStack());
- InitializeAggregate(class_type, allocate_result, initializer_results);
+ InitializeClass(class_type, allocate_result, initializer_results);
return stack_scope.Yield(allocate_result);
}
@@ -1582,7 +1546,9 @@ namespace {
void FailCallableLookup(const std::string& reason, const QualifiedName& name,
const TypeVector& parameter_types,
const std::vector<Binding<LocalLabel>*>& labels,
- const std::vector<Signature>& candidates) {
+ const std::vector<Signature>& candidates,
+ const std::vector<std::tuple<Generic*, const char*>>
+ inapplicable_generics) {
std::stringstream stream;
stream << "\n" << reason << ": \n " << name << "(" << parameter_types << ")";
if (labels.size() != 0) {
@@ -1596,10 +1562,20 @@ void FailCallableLookup(const std::string& reason, const QualifiedName& name,
stream << "\n " << name;
PrintSignature(stream, signature, false);
}
+ if (inapplicable_generics.size() != 0) {
+ stream << "\nfailed to instantiate all of these generic declarations:";
+ for (auto& failure : inapplicable_generics) {
+ Generic* generic;
+ const char* reason;
+ std::tie(generic, reason) = failure;
+ stream << "\n " << generic->name() << " defined at "
+ << generic->Position() << ":\n " << reason << "\n";
+ }
+ }
ReportError(stream.str());
}
-Callable* GetOrCreateSpecialization(const SpecializationKey& key) {
+Callable* GetOrCreateSpecialization(const SpecializationKey<Generic>& key) {
if (base::Optional<Callable*> specialization =
key.generic->specializations().Get(key.specialized_types)) {
return *specialization;
@@ -1655,16 +1631,20 @@ Callable* ImplementationVisitor::LookupCallable(
std::vector<Declarable*> overloads;
std::vector<Signature> overload_signatures;
+ std::vector<std::tuple<Generic*, const char*>> inapplicable_generics;
for (auto* declarable : declaration_container) {
if (Generic* generic = Generic::DynamicCast(declarable)) {
- base::Optional<TypeVector> inferred_specialization_types =
- generic->InferSpecializationTypes(specialization_types,
- parameter_types);
- if (!inferred_specialization_types) continue;
+ TypeArgumentInference inference = generic->InferSpecializationTypes(
+ specialization_types, parameter_types);
+ if (inference.HasFailed()) {
+ inapplicable_generics.push_back(
+ std::make_tuple(generic, inference.GetFailureReason()));
+ continue;
+ }
overloads.push_back(generic);
overload_signatures.push_back(
DeclarationVisitor::MakeSpecializedSignature(
- SpecializationKey{generic, *inferred_specialization_types}));
+ SpecializationKey<Generic>{generic, inference.GetResult()}));
} else if (Callable* callable = Callable::DynamicCast(declarable)) {
overloads.push_back(callable);
overload_signatures.push_back(callable->signature());
@@ -1674,16 +1654,12 @@ Callable* ImplementationVisitor::LookupCallable(
std::vector<size_t> candidates;
for (size_t i = 0; i < overloads.size(); ++i) {
const Signature& signature = overload_signatures[i];
- bool try_bool_context = labels.size() == 0 &&
- signature.return_type == TypeOracle::GetNeverType();
- if (IsCompatibleSignature(signature, parameter_types, labels.size()) ||
- (try_bool_context &&
- IsCompatibleSignature(signature, parameter_types, 2))) {
+ if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
candidates.push_back(i);
}
}
- if (overloads.empty()) {
+ if (overloads.empty() && inapplicable_generics.empty()) {
if (silence_errors) return nullptr;
std::stringstream stream;
stream << "no matching declaration found for " << name;
@@ -1691,7 +1667,8 @@ Callable* ImplementationVisitor::LookupCallable(
} else if (candidates.empty()) {
if (silence_errors) return nullptr;
FailCallableLookup("cannot find suitable callable with name", name,
- parameter_types, labels, overload_signatures);
+ parameter_types, labels, overload_signatures,
+ inapplicable_generics);
}
auto is_better_candidate = [&](size_t a, size_t b) {
@@ -1712,14 +1689,15 @@ Callable* ImplementationVisitor::LookupCallable(
candidate_signatures.push_back(overload_signatures[i]);
}
FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
- candidate_signatures);
+ candidate_signatures, inapplicable_generics);
}
}
if (Generic* generic = Generic::DynamicCast(overloads[best])) {
+ TypeArgumentInference inference = generic->InferSpecializationTypes(
+ specialization_types, parameter_types);
result = GetOrCreateSpecialization(
- SpecializationKey{generic, *generic->InferSpecializationTypes(
- specialization_types, parameter_types)});
+ SpecializationKey<Generic>{generic, inference.GetResult()});
} else {
result = Callable::cast(overloads[best]);
}
@@ -1749,14 +1727,13 @@ Callable* ImplementationVisitor::LookupCallable(
}
Method* ImplementationVisitor::LookupMethod(
- const std::string& name, LocationReference this_reference,
+ const std::string& name, const AggregateType* receiver_type,
const Arguments& arguments, const TypeVector& specialization_types) {
TypeVector types(arguments.parameters.ComputeTypeVector());
- types.insert(types.begin(), this_reference.ReferencedType());
- return Method::cast(LookupCallable(
- {{}, name},
- AggregateType::cast(this_reference.ReferencedType())->Methods(name),
- types, arguments.labels, specialization_types));
+ types.insert(types.begin(), receiver_type);
+ return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
+ types, arguments.labels,
+ specialization_types));
}
const Type* ImplementationVisitor::GetCommonType(const Type* left,
@@ -1783,24 +1760,36 @@ VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
StackScope stack_scope(this);
- const Type* raw_type = TypeVisitor::ComputeType(expr->type);
- if (!raw_type->IsStructType()) {
- ReportError(*raw_type, " is not a struct but used like one");
- }
- const StructType* struct_type = StructType::cast(raw_type);
+ auto& initializers = expr->initializers;
+ std::vector<VisitResult> values;
+ std::vector<const Type*> term_argument_types;
+ values.reserve(initializers.size());
+ term_argument_types.reserve(initializers.size());
- InitializerResults initialization_results =
- ImplementationVisitor::VisitInitializerResults(struct_type,
- expr->initializers);
+ // Compute values and types of all initializer arguments
+ for (const NameAndExpression& initializer : initializers) {
+ VisitResult value = Visit(initializer.expression);
+ values.push_back(value);
+ term_argument_types.push_back(value.type());
+ }
- // Push uninitialized 'this'
- VisitResult result = TemporaryUninitializedStruct(
- struct_type, "it's not initialized in the struct " + struct_type->name());
+ // Compute and check struct type from given struct name and argument types
+ const StructType* struct_type = TypeVisitor::ComputeTypeForStructExpression(
+ expr->type, term_argument_types);
+ CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
+ initializers);
- InitializeAggregate(struct_type, result, initialization_results);
+ // Implicitly convert values and thereby build the struct on the stack
+ StackRange struct_range = assembler().TopRange(0);
+ auto& fields = struct_type->fields();
+ for (size_t i = 0; i < values.size(); i++) {
+ values[i] =
+ GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
+ struct_range.Extend(values[i].stack_range());
+ }
- return stack_scope.Yield(result);
+ return stack_scope.Yield(VisitResult(struct_type, struct_range));
}
LocationReference ImplementationVisitor::GetLocationReference(
@@ -1865,7 +1854,33 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->field->pos, field.pos);
}
if (field.index) {
- return LocationReference::IndexedFieldAccess(object_result, fieldname);
+ assembler().Emit(
+ CreateFieldReferenceInstruction{object_result.type(), fieldname});
+ // Fetch the length from the object
+ {
+ StackScope length_scope(this);
+ // Get a reference to the length
+ const Field* index_field = field.index.value();
+ GenerateCopy(object_result);
+ assembler().Emit(CreateFieldReferenceInstruction{
+ object_result.type(), index_field->name_and_type.name});
+ VisitResult length_reference(
+ TypeOracle::GetReferenceType(index_field->name_and_type.type),
+ assembler().TopRange(2));
+
+ // Load the length from the reference and convert it to intptr
+ VisitResult length = GenerateFetchFromLocation(
+ LocationReference::HeapReference(length_reference));
+ VisitResult converted_length =
+ GenerateCall("Convert", {{length}, {}},
+ {TypeOracle::GetIntPtrType(), length.type()}, false);
+ DCHECK_EQ(converted_length.stack_range().Size(), 1);
+ length_scope.Yield(converted_length);
+ }
+ const Type* slice_type =
+ TypeOracle::GetSliceType(field.name_and_type.type);
+ return LocationReference::HeapSlice(
+ VisitResult(slice_type, assembler().TopRange(3)));
} else {
assembler().Emit(
CreateFieldReferenceInstruction{*class_type, fieldname});
@@ -1883,8 +1898,13 @@ LocationReference ImplementationVisitor::GetLocationReference(
ElementAccessExpression* expr) {
LocationReference reference = GetLocationReference(expr->array);
VisitResult index = Visit(expr->index);
- if (reference.IsIndexedFieldAccess()) {
- return LocationReference::IndexedFieldIndexedAccess(reference, index);
+ if (reference.IsHeapSlice()) {
+ Arguments arguments{{index}, {}};
+ const AggregateType* slice_type =
+ AggregateType::cast(reference.heap_slice().type());
+ Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
+ return LocationReference::HeapReference(
+ GenerateCall(method, reference, arguments, {}, false));
} else {
return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
index);
@@ -1927,8 +1947,9 @@ LocationReference ImplementationVisitor::GetLocationReference(
}
if (expr->generic_arguments.size() != 0) {
Generic* generic = Declarations::LookupUniqueGeneric(name);
- Callable* specialization = GetOrCreateSpecialization(SpecializationKey{
- generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
+ Callable* specialization =
+ GetOrCreateSpecialization(SpecializationKey<Generic>{
+ generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
DCHECK(!builtin->IsExternal());
return LocationReference::Temporary(GetBuiltinCode(builtin),
@@ -1963,8 +1984,8 @@ LocationReference ImplementationVisitor::GetLocationReference(
LocationReference ImplementationVisitor::GetLocationReference(
DereferenceExpression* expr) {
VisitResult ref = Visit(expr->reference);
- const ReferenceType* type = ReferenceType::DynamicCast(ref.type());
- if (!type) {
+ if (!StructType::MatchUnaryGeneric(ref.type(),
+ TypeOracle::GetReferenceGeneric())) {
ReportError("Operator * expects a reference but found a value of type ",
*ref.type());
}
@@ -1983,7 +2004,7 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
DCHECK_EQ(1, LoweredSlotCount(reference.ReferencedType()));
return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
} else {
- if (reference.IsIndexedFieldAccess()) {
+ if (reference.IsHeapSlice()) {
ReportError(
"fetching a value directly from an indexed field isn't allowed");
}
@@ -2011,12 +2032,19 @@ void ImplementationVisitor::GenerateAssignToLocation(
if (reference.binding()) {
(*reference.binding())->SetWritten();
}
- } else if (reference.IsIndexedFieldAccess()) {
+ } else if (reference.IsHeapSlice()) {
ReportError("assigning a value directly to an indexed field isn't allowed");
} else if (reference.IsHeapReference()) {
const Type* referenced_type = reference.ReferencedType();
GenerateCopy(reference.heap_reference());
- GenerateImplicitConvert(referenced_type, assignment_value);
+ VisitResult converted_assignment_value =
+ GenerateImplicitConvert(referenced_type, assignment_value);
+ if (referenced_type == TypeOracle::GetFloat64Type()) {
+ VisitResult silenced_float_value =
+ GenerateCall("Float64SilenceNaN", {{assignment_value}, {}});
+ assembler().Poke(converted_assignment_value.stack_range(),
+ silenced_float_value.stack_range(), referenced_type);
+ }
assembler().Emit(StoreReferenceInstruction{referenced_type});
} else {
DCHECK(reference.IsTemporary());
@@ -2097,27 +2125,20 @@ VisitResult ImplementationVisitor::GenerateCall(
Callable* callable, base::Optional<LocationReference> this_reference,
Arguments arguments, const TypeVector& specialization_types,
bool is_tailcall) {
- // Operators used in a branching context can also be function calls that never
- // return but have a True and False label
- if (arguments.labels.size() == 0 &&
- callable->signature().labels.size() == 2) {
- base::Optional<Binding<LocalLabel>*> true_label =
- TryLookupLabel(kTrueLabelName);
- base::Optional<Binding<LocalLabel>*> false_label =
- TryLookupLabel(kFalseLabelName);
- if (!true_label || !false_label) {
- ReportError(
- callable->ReadableName(),
- " does not return a value, but has to be called in a branching "
- "context (e.g., conditional or if-condition). You can fix this by "
- "adding \"? true : false\".");
+ const Type* return_type = callable->signature().return_type;
+
+ if (is_tailcall) {
+ if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
+ const Type* outer_return_type = builtin->signature().return_type;
+ if (!return_type->IsSubtypeOf(outer_return_type)) {
+ Error("Cannot tailcall, type of result is ", *return_type,
+ " but should be a subtype of ", *outer_return_type, ".");
+ }
+ } else {
+ Error("Tail calls are only allowed from builtins");
}
- arguments.labels.push_back(*true_label);
- arguments.labels.push_back(*false_label);
}
- const Type* return_type = callable->signature().return_type;
-
std::vector<VisitResult> converted_arguments;
StackRange argument_range = assembler().TopRange(0);
std::vector<std::string> constexpr_arguments;
@@ -2142,8 +2163,8 @@ VisitResult ImplementationVisitor::GenerateCall(
if (this_reference) {
DCHECK(callable->IsMethod());
Method* method = Method::cast(callable);
- // By now, the this reference should either be a variable or
- // a temporary, in both cases the fetch of the VisitResult should succeed.
+ // By now, the this reference should either be a variable, a temporary or
+ // a Slice. In either case the fetch of the VisitResult should succeed.
VisitResult this_value = this_reference->GetVisitResult();
if (method->ShouldBeInlined()) {
if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
@@ -2280,9 +2301,10 @@ VisitResult ImplementationVisitor::GenerateCall(
size_t j = 0;
for (auto t : callable->signature().labels[i].types) {
const Type* parameter_type = label->parameter_types[j];
- if (parameter_type != t) {
- ReportError("mismatch of label parameters (expected ", *t, " got ",
- parameter_type, " for parameter ", i + 1, ")");
+ if (!t->IsSubtypeOf(parameter_type)) {
+ ReportError("mismatch of label parameters (label expects ",
+ *parameter_type, " but macro produces ", *t,
+ " for parameter ", i + 1, ")");
}
j++;
}
@@ -2360,6 +2382,7 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
if (auto* loc_expr = LocationExpression::DynamicCast(expr->arguments[0])) {
LocationReference ref = GetLocationReference(loc_expr);
if (ref.IsHeapReference()) return scope.Yield(ref.heap_reference());
+ if (ref.IsHeapSlice()) return scope.Yield(ref.heap_slice());
}
ReportError("Unable to create a heap reference.");
}
@@ -2413,7 +2436,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
QualifiedName qualified_name = QualifiedName(method_name);
Callable* callable = nullptr;
- callable = LookupMethod(method_name, target, arguments, {});
+ callable = LookupMethod(method_name, target_type, arguments, {});
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(expr->method->name->pos,
callable->IdentifierPosition());
@@ -2429,7 +2452,7 @@ VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
for (Expression* arg : expr->arguments)
arguments.parameters.push_back(Visit(arg));
return scope.Yield(
- GenerateCall(expr->name, arguments, specialization_types, false));
+ GenerateCall(expr->name->value, arguments, specialization_types, false));
}
void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
@@ -2440,32 +2463,20 @@ void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
assembler().Branch(true_block, false_block);
}
-void ImplementationVisitor::GenerateExpressionBranch(
- VisitResultGenerator generator, Block* true_block, Block* false_block) {
- // Conditional expressions can either explicitly return a bit
- // type, or they can be backed by macros that don't return but
- // take a true and false label. By declaring the labels before
- // visiting the conditional expression, those label-based
- // macro conditionals will be able to find them through normal
- // label lookups.
- Binding<LocalLabel> true_binding{&LabelBindingsManager::Get(), kTrueLabelName,
- LocalLabel{true_block}};
- Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
- kFalseLabelName, LocalLabel{false_block}};
- StackScope stack_scope(this);
- VisitResult expression_result = generator();
- if (!expression_result.type()->IsNever()) {
- expression_result = stack_scope.Yield(
- GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
- GenerateBranch(expression_result, true_block, false_block);
- }
+VisitResult ImplementationVisitor::GenerateBoolConstant(bool constant) {
+ return GenerateImplicitConvert(TypeOracle::GetBoolType(),
+ VisitResult(TypeOracle::GetConstexprBoolType(),
+ constant ? "true" : "false"));
}
void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
Block* true_block,
Block* false_block) {
- GenerateExpressionBranch([&]() { return this->Visit(expression); },
- true_block, false_block);
+ StackScope stack_scope(this);
+ VisitResult expression_result = this->Visit(expression);
+ expression_result = stack_scope.Yield(
+ GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
+ GenerateBranch(expression_result, true_block, false_block);
}
VisitResult ImplementationVisitor::GenerateImplicitConvert(
@@ -2530,10 +2541,6 @@ StackRange ImplementationVisitor::LowerParameter(
range.Extend(parameter_range);
}
return range;
- } else if (type->IsReferenceType()) {
- lowered_parameters->Push(parameter_name + ".object");
- lowered_parameters->Push(parameter_name + ".offset");
- return lowered_parameters->TopRange(2);
} else {
lowered_parameters->Push(parameter_name);
return lowered_parameters->TopRange(1);
@@ -2663,70 +2670,6 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
}
}
-namespace {
-class IfDefScope {
- public:
- IfDefScope(std::ostream& os, std::string d) : os_(os), d_(std::move(d)) {
- os_ << "#ifdef " << d_ << "\n";
- }
- ~IfDefScope() { os_ << "#endif // " << d_ << "\n"; }
-
- private:
- std::ostream& os_;
- std::string d_;
-};
-
-class NamespaceScope {
- public:
- NamespaceScope(std::ostream& os,
- std::initializer_list<std::string> namespaces)
- : os_(os), d_(std::move(namespaces)) {
- for (const std::string& s : d_) {
- os_ << "namespace " << s << " {\n";
- }
- }
- ~NamespaceScope() {
- for (auto i = d_.rbegin(); i != d_.rend(); ++i) {
- os_ << "} // namespace " << *i << "\n";
- }
- }
-
- private:
- std::ostream& os_;
- std::vector<std::string> d_;
-};
-
-class IncludeGuardScope {
- public:
- IncludeGuardScope(std::ostream& os, std::string file_name)
- : os_(os),
- d_("V8_GEN_TORQUE_GENERATED_" + CapifyStringWithUnderscores(file_name) +
- "_") {
- os_ << "#ifndef " << d_ << "\n";
- os_ << "#define " << d_ << "\n\n";
- }
- ~IncludeGuardScope() { os_ << "#endif // " << d_ << "\n"; }
-
- private:
- std::ostream& os_;
- std::string d_;
-};
-
-class IncludeObjectMacrosScope {
- public:
- explicit IncludeObjectMacrosScope(std::ostream& os) : os_(os) {
- os_ << "\n// Has to be the last include (doesn't have include guards):\n"
- "#include \"src/objects/object-macros.h\"\n";
- }
- ~IncludeObjectMacrosScope() {
- os_ << "\n#include \"src/objects/object-macros-undef.h\"\n";
- }
-
- private:
- std::ostream& os_;
-};
-} // namespace
-
void ImplementationVisitor::GenerateBuiltinDefinitions(
const std::string& output_directory) {
std::stringstream new_contents_stream;
@@ -2741,7 +2684,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(
for (auto& declarable : GlobalContext::AllDeclarables()) {
Builtin* builtin = Builtin::DynamicCast(declarable.get());
if (!builtin || builtin->IsExternal()) continue;
- int firstParameterIndex = 1;
+ size_t firstParameterIndex = 1;
bool declareParameters = true;
if (builtin->IsStub()) {
new_contents_stream << "TFS(" << builtin->ExternalName();
@@ -2752,24 +2695,22 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(
<< ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
declareParameters = false;
} else {
- assert(builtin->IsFixedArgsJavaScript());
+ DCHECK(builtin->IsFixedArgsJavaScript());
// FixedArg javascript builtins need to offer the parameter
// count.
- int size = static_cast<int>(builtin->parameter_names().size());
- assert(size >= 1);
- new_contents_stream << ", " << (std::max(size - 2, 0));
+ int parameter_count =
+ static_cast<int>(builtin->signature().ExplicitCount());
+ new_contents_stream << ", " << parameter_count;
// And the receiver is explicitly declared.
new_contents_stream << ", kReceiver";
- firstParameterIndex = 2;
+ firstParameterIndex = builtin->signature().implicit_count;
}
}
if (declareParameters) {
- int index = 0;
- for (const auto& parameter : builtin->parameter_names()) {
- if (index >= firstParameterIndex) {
- new_contents_stream << ", k" << CamelifyString(parameter->value);
- }
- index++;
+ for (size_t i = firstParameterIndex;
+ i < builtin->parameter_names().size(); ++i) {
+ Identifier* parameter = builtin->parameter_names()[i];
+ new_contents_stream << ", k" << CamelifyString(parameter->value);
}
}
new_contents_stream << ") \\\n";
@@ -2834,15 +2775,31 @@ class FieldOffsetsGenerator {
public:
explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
- virtual void WriteField(const Field& f) = 0;
+ virtual void WriteField(const Field& f, const std::string& size_string) = 0;
virtual void WriteMarker(const std::string& marker) = 0;
+ virtual void BeginPrivateOffsets() = 0;
virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
void RecordOffsetFor(const Field& f) {
CHECK(!is_finished_);
UpdateSection(f);
- WriteField(f);
+ // We don't know statically how much space an indexed field takes, so report
+ // it as zero.
+ std::string size_string = "0";
+ if (!f.index.has_value()) {
+ size_t field_size;
+ std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ }
+ WriteField(f, size_string);
+
+ // Offsets for anything after an indexed field are likely to cause
+ // confusion, because the indexed field itself takes up a variable amount of
+ // space. We could not emit them at all, but that might allow an inherited
+ // kSize to be accessible (and wrong), so we emit them as private.
+ if (f.index.has_value()) {
+ BeginPrivateOffsets();
+ }
}
void Finish() {
@@ -2923,17 +2880,16 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
out_ << "TORQUE_GENERATED_" << CapifyStringWithUnderscores(type_->name())
<< "_FIELDS(V) \\\n";
}
- virtual void WriteField(const Field& f) {
- size_t field_size;
- std::string size_string;
- std::string machine_type;
- std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ void WriteField(const Field& f, const std::string& size_string) override {
out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
<< size_string << ") \\\n";
}
- virtual void WriteMarker(const std::string& marker) {
+ void WriteMarker(const std::string& marker) override {
out_ << "V(" << marker << ", 0) \\\n";
}
+ void BeginPrivateOffsets() override {
+ // Can't do anything meaningful here in the macro generator.
+ }
private:
std::ostream& out_;
@@ -3025,13 +2981,15 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
// TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
// to generate field offsets without the use of macros.
- MacroFieldOffsetsGenerator g(header, type);
- for (auto f : type->fields()) {
- CurrentSourcePosition::Scope scope(f.pos);
- g.RecordOffsetFor(f);
+ if (!type->GenerateCppClassDefinitions()) {
+ MacroFieldOffsetsGenerator g(header, type);
+ for (auto f : type->fields()) {
+ CurrentSourcePosition::Scope scope(f.pos);
+ g.RecordOffsetFor(f);
+ }
+ g.Finish();
+ header << "\n";
}
- g.Finish();
- header << "\n";
}
}
const std::string output_header_path = output_directory + "/" + file_name;
@@ -3046,11 +3004,7 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
: FieldOffsetsGenerator(type),
hdr_(header),
previous_field_end_("P::kHeaderSize") {}
- virtual void WriteField(const Field& f) {
- size_t field_size;
- std::string size_string;
- std::string machine_type;
- std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ void WriteField(const Field& f, const std::string& size_string) override {
std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
std::string field_end = field + "End";
hdr_ << " static constexpr int " << field << " = " << previous_field_end_
@@ -3059,10 +3013,15 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
<< size_string << " - 1;\n";
previous_field_end_ = field_end + " + 1";
}
- virtual void WriteMarker(const std::string& marker) {
+ void WriteMarker(const std::string& marker) override {
hdr_ << " static constexpr int " << marker << " = " << previous_field_end_
<< ";\n";
}
+ void BeginPrivateOffsets() override {
+ // The following section must re-establish public mode (currently done by
+ // GenerateClassConstructors).
+ hdr_ << " private:\n";
+ }
private:
std::ostream& hdr_;
@@ -3109,8 +3068,6 @@ class CppClassGenerator {
};
void CppClassGenerator::GenerateClass() {
- hdr_ << "class " << name_ << ";\n\n";
-
hdr_ << template_decl() << "\n";
hdr_ << "class " << gen_name_ << " : public P {\n";
hdr_ << " static_assert(std::is_same<" << name_ << ", D>::value,\n"
@@ -3118,7 +3075,7 @@ void CppClassGenerator::GenerateClass() {
hdr_ << " static_assert(std::is_same<" << super_->name() << ", P>::value,\n"
<< " \"Pass in " << super_->name()
<< " as second template parameter for " << gen_name_ << ".\");\n";
- hdr_ << "public: \n";
+ hdr_ << " public: \n";
hdr_ << " using Super = P;\n";
for (const Field& f : type_->fields()) {
GenerateFieldAccessor(f);
@@ -3170,7 +3127,7 @@ void CppClassGenerator::GenerateClassCasts() {
}
void CppClassGenerator::GenerateClassConstructors() {
- hdr_ << "public:\n";
+ hdr_ << " public:\n";
hdr_ << " template <class DAlias = D>\n";
hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
hdr_ << " static_assert(std::is_base_of<" << gen_name_ << ", \n";
@@ -3282,9 +3239,9 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
DCHECK(field_type->IsSubtypeOf(TypeOracle::GetObjectType()));
const std::string& name = f.name_and_type.name;
const std::string offset = "k" + CamelifyString(name) + "Offset";
- const ClassType* class_type = ClassType::DynamicCast(field_type);
+ base::Optional<const ClassType*> class_type = field_type->ClassSupertype();
- std::string type = class_type ? class_type->name() : "Object";
+ std::string type = class_type ? (*class_type)->name() : "Object";
// Generate declarations in header.
if (!class_type && field_type != TypeOracle::GetObjectType()) {
@@ -3356,20 +3313,42 @@ void ImplementationVisitor::GenerateClassDefinitions(
IncludeGuardScope inline_header_guard(inline_header, basename + "-inl.h");
inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
- inline_header << "#include \"src/objects/objects-inl.h\"\n\n";
+ inline_header << "#include \"src/objects/js-promise.h\"\n";
+ inline_header << "#include \"src/objects/module.h\"\n";
+ inline_header << "#include \"src/objects/objects-inl.h\"\n";
+ inline_header << "#include \"src/objects/script.h\"\n\n";
IncludeObjectMacrosScope inline_header_macros(inline_header);
NamespaceScope inline_header_namespaces(inline_header, {"v8", "internal"});
implementation
<< "#include \"torque-generated/class-definitions-tq.h\"\n\n";
implementation << "#include \"torque-generated/class-verifiers-tq.h\"\n\n";
- implementation << "#include \"src/objects/struct-inl.h\"\n\n";
+ implementation << "#include \"src/objects/arguments-inl.h\"\n";
+ implementation << "#include \"src/objects/js-collection-inl.h\"\n";
+ implementation << "#include \"src/objects/embedder-data-array-inl.h\"\n";
+ implementation << "#include \"src/objects/js-generator-inl.h\"\n";
+ implementation << "#include \"src/objects/js-regexp-inl.h\"\n";
+ implementation
+ << "#include \"src/objects/js-regexp-string-iterator-inl.h\"\n";
+ implementation << "#include \"src/objects/literal-objects-inl.h\"\n";
+ implementation << "#include \"src/objects/microtask-inl.h\"\n";
+ implementation << "#include \"src/objects/module-inl.h\"\n";
+ implementation << "#include \"src/objects/promise-inl.h\"\n";
+ implementation << "#include \"src/objects/stack-frame-info-inl.h\"\n";
+ implementation << "#include \"src/objects/struct-inl.h\"\n";
+ implementation << "#include \"src/objects/template-objects-inl.h\"\n\n";
implementation
<< "#include "
"\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
NamespaceScope implementation_namespaces(implementation,
{"v8", "internal"});
+ // Generate forward declarations for every class.
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ header << "class " << type->name() << ";\n";
+ }
+
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
if (type->GenerateCppClassDefinitions()) {
@@ -3724,6 +3703,16 @@ void ReportAllUnusedMacros() {
if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
continue;
}
+ // TODO(gsps): Mark methods of generic structs used if they are used in any
+ // instantiation
+ if (Method* method = Method::DynamicCast(macro)) {
+ if (StructType* struct_type =
+ StructType::DynamicCast(method->aggregate_type())) {
+ if (struct_type->GetSpecializedFrom().has_value()) {
+ continue;
+ }
+ }
+ }
std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
"FromConstexpr<"};