diff --git a/DEPENDENCIES b/DEPENDENCIES index ba57700f..9ddbb1fd 100644 --- a/DEPENDENCIES +++ b/DEPENDENCIES @@ -1,4 +1,4 @@ vendorpull https://github.com/sourcemeta/vendorpull 70342aaf458e6cb80baeb5b718901075fc42ede6 noa https://github.com/sourcemeta/noa 653bda26413812241e503fd0b550a66f2df4700f googletest https://github.com/google/googletest 987e225614755fec7253aa95bf959c09e0d380d7 -webkit https://github.com/WebKit/WebKit 692ee545dbe93299189b974aa572c7fd4b6277bc +webkit https://github.com/WebKit/WebKit d477c762a9ecbbc8dedf3ca7a6a2a079577bf60c diff --git a/Makefile b/Makefile index db216480..906dcaf2 100644 --- a/Makefile +++ b/Makefile @@ -45,10 +45,8 @@ webkit: .always -DPORT="JSCOnly" \ -DENABLE_TOOLS:BOOL=OFF \ -DDEVELOPER_MODE:BOOL=OFF \ - -DENABLE_FTL_JIT:BOOL=ON \ -DENABLE_STATIC_JSC:BOOL=ON \ - -DUSE_SYSTEM_MALLOC:BOOL=ON \ - -DICU_DEBUG:BOOL=ON \ + -DUSE_THIN_ARCHIVES:BOOL=ON \ -DWEBKIT_LIBRARIES_DIR:STRING=D:/a/includejs/includejs/build/WebKitLibraries \ -DCMAKE_BUILD_TYPE:STRING=$(PRESET) $(CMAKE) --build ./build --config $(PRESET) --parallel 4 diff --git a/vendor/webkit.mask b/vendor/webkit.mask index 1cf17f51..a573e5a7 100644 --- a/vendor/webkit.mask +++ b/vendor/webkit.mask @@ -24,7 +24,6 @@ Source/PlatformWin.cmake Source/WebDriver Source/WebGPU Source/WebKitLegacy -Source/bmalloc Source/WebInspectorUI Source/WebKit Source/WebCore diff --git a/vendor/webkit/Source/CMakeLists.txt b/vendor/webkit/Source/CMakeLists.txt index c9729ba4..9d0a5286 100644 --- a/vendor/webkit/Source/CMakeLists.txt +++ b/vendor/webkit/Source/CMakeLists.txt @@ -30,6 +30,10 @@ if (USE_LIBWEBRTC) include_directories(${CMAKE_CURRENT_BINARY_DIR}/ThirdParty/libwebrtc) endif () +if (USE_SKIA) + add_subdirectory(ThirdParty/skia) +endif () + if (ENABLE_WEBINSPECTORUI) add_subdirectory(WebInspectorUI) endif () diff --git a/vendor/webkit/Source/JavaScriptCore/API/APICallbackFunction.h b/vendor/webkit/Source/JavaScriptCore/API/APICallbackFunction.h index 616924b9..bd3b99bd 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/APICallbackFunction.h +++ b/vendor/webkit/Source/JavaScriptCore/API/APICallbackFunction.h @@ -49,10 +49,9 @@ EncodedJSValue APICallbackFunction::callImpl(JSGlobalObject* globalObject, CallF JSObjectRef thisObjRef = toRef(jsCast(callFrame->thisValue().toThis(globalObject, ECMAMode::sloppy()))); int argumentCount = static_cast(callFrame->argumentCount()); - Vector arguments; - arguments.reserveInitialCapacity(argumentCount); - for (int i = 0; i < argumentCount; i++) - arguments.uncheckedAppend(toRef(globalObject, callFrame->uncheckedArgument(i))); + Vector arguments(argumentCount, [&](size_t i) { + return toRef(globalObject, callFrame->uncheckedArgument(i)); + }); JSValueRef exception = nullptr; JSValueRef result; @@ -93,10 +92,9 @@ EncodedJSValue APICallbackFunction::constructImpl(JSGlobalObject* globalObject, } size_t argumentCount = callFrame->argumentCount(); - Vector arguments; - arguments.reserveInitialCapacity(argumentCount); - for (size_t i = 0; i < argumentCount; ++i) - arguments.uncheckedAppend(toRef(globalObject, callFrame->uncheckedArgument(i))); + Vector arguments(argumentCount, [&](size_t i) { + return toRef(globalObject, callFrame->uncheckedArgument(i)); + }); JSValueRef exception = nullptr; JSObjectRef result; diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h b/vendor/webkit/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h index 3759f640..a8bc7813 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h +++ b/vendor/webkit/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h @@ -478,10 +478,9 @@ EncodedJSValue JSCallbackObject::constructImpl(JSGlobalObject* globalObj for (JSClassRef jsClass = jsCast*>(constructor)->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectCallAsConstructorCallback callAsConstructor = jsClass->callAsConstructor) { size_t argumentCount = callFrame->argumentCount(); - Vector arguments; - arguments.reserveInitialCapacity(argumentCount); - for (size_t i = 0; i < argumentCount; ++i) - arguments.uncheckedAppend(toRef(globalObject, callFrame->uncheckedArgument(i))); + Vector arguments(argumentCount, [&](size_t i) { + return toRef(globalObject, callFrame->uncheckedArgument(i)); + }); JSValueRef exception = nullptr; JSObject* result; { @@ -556,10 +555,10 @@ EncodedJSValue JSCallbackObject::callImpl(JSGlobalObject* globalObject, for (JSClassRef jsClass = jsCast*>(toJS(functionRef))->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectCallAsFunctionCallback callAsFunction = jsClass->callAsFunction) { size_t argumentCount = callFrame->argumentCount(); - Vector arguments; - arguments.reserveInitialCapacity(argumentCount); - for (size_t i = 0; i < argumentCount; ++i) - arguments.uncheckedAppend(toRef(globalObject, callFrame->uncheckedArgument(i))); + Vector arguments(argumentCount, [&](size_t i) { + return toRef(globalObject, callFrame->uncheckedArgument(i)); + }); + JSValueRef exception = nullptr; JSValue result; { diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSContext.mm b/vendor/webkit/Source/JavaScriptCore/API/JSContext.mm index 887974b8..e22e0ff4 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSContext.mm +++ b/vendor/webkit/Source/JavaScriptCore/API/JSContext.mm @@ -37,6 +37,7 @@ #import "JSGlobalObject.h" #import "JSInternalPromise.h" #import "JSModuleLoader.h" +#import "JSRetainPtr.h" #import "JSScriptInternal.h" #import "JSValueInternal.h" #import "JSVirtualMachineInternal.h" @@ -261,11 +262,11 @@ - (JSVirtualMachine *)virtualMachine - (NSString *)name { - JSStringRef name = JSGlobalContextCopyName(m_context); + auto name = adopt(JSGlobalContextCopyName(m_context)); if (!name) return nil; - return adoptCF(JSStringCopyCFString(kCFAllocatorDefault, name)).bridgingAutorelease(); + return adoptCF(JSStringCopyCFString(kCFAllocatorDefault, name.get())).bridgingAutorelease(); } - (void)setName:(NSString *)name diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSContextRef.cpp b/vendor/webkit/Source/JavaScriptCore/API/JSContextRef.cpp index a64de537..cadc1a77 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSContextRef.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/JSContextRef.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006-2022 Apple Inc. All rights reserved. + * Copyright (C) 2006-2024 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -333,10 +333,8 @@ class BacktraceFunctor { builder.append('\n'); builder.append('#', visitor->index(), ' ', visitor->functionName(), "() at ", visitor->sourceURL()); if (visitor->hasLineAndColumnInfo()) { - unsigned lineNumber; - unsigned unusedColumn; - visitor->computeLineAndColumn(lineNumber, unusedColumn); - builder.append(':', lineNumber); + auto lineColumn = visitor->computeLineAndColumn(); + builder.append(':', lineColumn.line); } if (!visitor->callee().rawPtr()) diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSContextRefPrivate.h b/vendor/webkit/Source/JavaScriptCore/API/JSContextRefPrivate.h index 8e98e0fe..1a8d9286 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSContextRefPrivate.h +++ b/vendor/webkit/Source/JavaScriptCore/API/JSContextRefPrivate.h @@ -101,14 +101,14 @@ JS_EXPORT void JSContextGroupClearExecutionTimeLimit(JSContextGroupRef group) JS @result The value of the enablement, true if the sampling profiler gets enabled, otherwise false. @discussion Remote inspection is true by default. */ -JS_EXPORT bool JSContextGroupEnableSamplingProfiler(JSContextGroupRef group) JSC_API_AVAILABLE(macos(JSC_MAC_TBA), ios(JSC_IOS_TBA)); +JS_EXPORT bool JSContextGroupEnableSamplingProfiler(JSContextGroupRef group) JSC_API_AVAILABLE(macos(14.2), ios(17.2)); /*! @function @abstract Disables sampling profiler. @param group The JavaScript context group to stop sampling. */ -JS_EXPORT void JSContextGroupDisableSamplingProfiler(JSContextGroupRef group) JSC_API_AVAILABLE(macos(JSC_MAC_TBA), ios(JSC_IOS_TBA)); +JS_EXPORT void JSContextGroupDisableSamplingProfiler(JSContextGroupRef group) JSC_API_AVAILABLE(macos(14.2), ios(17.2)); /*! @function @@ -117,7 +117,7 @@ JS_EXPORT void JSContextGroupDisableSamplingProfiler(JSContextGroupRef group) JS @result The sampling profiler output in JSON form. NULL if sampling profiler is not enabled ever before. @discussion Calling this function clears the sampling data accumulated so far. */ -JS_EXPORT JSStringRef JSContextGroupTakeSamplesFromSamplingProfiler(JSContextGroupRef group) JSC_API_AVAILABLE(macos(JSC_MAC_TBA), ios(JSC_IOS_TBA)); +JS_EXPORT JSStringRef JSContextGroupTakeSamplesFromSamplingProfiler(JSContextGroupRef group) JSC_API_AVAILABLE(macos(14.2), ios(17.2)); /*! @function diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSManagedValue.mm b/vendor/webkit/Source/JavaScriptCore/API/JSManagedValue.mm index 2c2c745a..bd48f046 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSManagedValue.mm +++ b/vendor/webkit/Source/JavaScriptCore/API/JSManagedValue.mm @@ -145,6 +145,9 @@ - (void)didRemoveOwner:(id)owner - (JSValue *)value { + if (!m_lock) + return nil; + WTF::Locker locker(m_lock.get()); JSC::VM* vm = m_lock->vm(); if (!vm) diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSObjectRef.cpp b/vendor/webkit/Source/JavaScriptCore/API/JSObjectRef.cpp index 914043d4..fc076851 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSObjectRef.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/JSObjectRef.cpp @@ -142,6 +142,7 @@ JSObjectRef JSObjectMakeFunction(JSContextRef ctx, JSStringRef name, unsigned pa Identifier nameID = name ? name->identifier(&vm) : Identifier::fromString(vm, "anonymous"_s); MarkedArgumentBuffer args; + args.ensureCapacity(parameterCount + 1); for (unsigned i = 0; i < parameterCount; i++) args.append(jsString(vm, parameterNames[i]->string())); args.append(jsString(vm, body->string())); @@ -173,6 +174,7 @@ JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSVa JSObject* result; if (argumentCount) { MarkedArgumentBuffer argList; + argList.ensureCapacity(argumentCount); for (size_t i = 0; i < argumentCount; ++i) argList.append(toJS(globalObject, arguments[i])); if (UNLIKELY(argList.hasOverflowed())) { @@ -204,6 +206,7 @@ JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, const JSVal auto scope = DECLARE_CATCH_SCOPE(vm); MarkedArgumentBuffer argList; + argList.ensureCapacity(argumentCount); for (size_t i = 0; i < argumentCount; ++i) argList.append(toJS(globalObject, arguments[i])); if (UNLIKELY(argList.hasOverflowed())) { @@ -254,6 +257,7 @@ JSObjectRef JSObjectMakeRegExp(JSContextRef ctx, size_t argumentCount, const JSV auto scope = DECLARE_CATCH_SCOPE(vm); MarkedArgumentBuffer argList; + argList.ensureCapacity(argumentCount); for (size_t i = 0; i < argumentCount; ++i) argList.append(toJS(globalObject, arguments[i])); if (UNLIKELY(argList.hasOverflowed())) { @@ -717,6 +721,7 @@ JSValueRef JSObjectCallAsFunction(JSContextRef ctx, JSObjectRef object, JSObject jsThisObject = globalObject->globalThis(); MarkedArgumentBuffer argList; + argList.ensureCapacity(argumentCount); for (size_t i = 0; i < argumentCount; i++) argList.append(toJS(globalObject, arguments[i])); if (UNLIKELY(argList.hasOverflowed())) { @@ -763,6 +768,7 @@ JSObjectRef JSObjectCallAsConstructor(JSContextRef ctx, JSObjectRef object, size return nullptr; MarkedArgumentBuffer argList; + argList.ensureCapacity(argumentCount); for (size_t i = 0; i < argumentCount; i++) argList.append(toJS(globalObject, arguments[i])); if (UNLIKELY(argList.hasOverflowed())) { @@ -809,10 +815,9 @@ JSPropertyNameArrayRef JSObjectCopyPropertyNames(JSContextRef ctx, JSObjectRef o PropertyNameArray array(vm, PropertyNameMode::Strings, PrivateSymbolMode::Exclude); jsObject->getPropertyNames(globalObject, array, DontEnumPropertiesMode::Exclude); - size_t size = array.size(); - propertyNames->array.reserveInitialCapacity(size); - for (size_t i = 0; i < size; ++i) - propertyNames->array.uncheckedAppend(OpaqueJSString::tryCreate(array[i].string()).releaseNonNull()); + propertyNames->array = WTF::map(array, [](auto& item) { + return OpaqueJSString::tryCreate(item.string()).releaseNonNull(); + }); return JSPropertyNameArrayRetain(propertyNames); } diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSScriptRef.cpp b/vendor/webkit/Source/JavaScriptCore/API/JSScriptRef.cpp index b83df76c..a9bda4f3 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSScriptRef.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/JSScriptRef.cpp @@ -74,7 +74,7 @@ static bool parseScript(VM& vm, const SourceCode& source, ParserError& error) { return !!JSC::parse( vm, source, Identifier(), ImplementationVisibility::Public, JSParserBuiltinMode::NotBuiltin, - JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, SuperBinding::NotNeeded, + JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, FunctionMode::None, SuperBinding::NotNeeded, error); } diff --git a/vendor/webkit/Source/JavaScriptCore/API/JSWrapperMap.h b/vendor/webkit/Source/JavaScriptCore/API/JSWrapperMap.h index 6c18c64b..a5946a8f 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/JSWrapperMap.h +++ b/vendor/webkit/Source/JavaScriptCore/API/JSWrapperMap.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,8 +23,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#import "JSValueInternal.h" #import -#import #import #if JSC_OBJC_API_ENABLED diff --git a/vendor/webkit/Source/JavaScriptCore/API/MarkedJSValueRefArray.h b/vendor/webkit/Source/JavaScriptCore/API/MarkedJSValueRefArray.h index d28272a1..0d343239 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/MarkedJSValueRefArray.h +++ b/vendor/webkit/Source/JavaScriptCore/API/MarkedJSValueRefArray.h @@ -58,7 +58,7 @@ class MarkedJSValueRefArray final : public BasicRawSentinelNode +#import "JSCallbackFunction.h" #if defined(__OBJC__) @class JSContext; diff --git a/vendor/webkit/Source/JavaScriptCore/API/glib/JSCClass.cpp b/vendor/webkit/Source/JavaScriptCore/API/glib/JSCClass.cpp index 98c10c86..e7545634 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/glib/JSCClass.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/glib/JSCClass.cpp @@ -599,12 +599,9 @@ JSCValue* jsc_class_add_constructor(JSCClass* jscClass, const char* name, GCallb va_list args; va_start(args, paramCount); - Vector parameters; - if (paramCount) { - parameters.reserveInitialCapacity(paramCount); - for (unsigned i = 0; i < paramCount; ++i) - parameters.uncheckedAppend(va_arg(args, GType)); - } + Vector parameters(paramCount, [&](size_t) -> GType { + return va_arg(args, GType); + }); va_end(args); return jscClassCreateConstructor(jscClass, name ? name : priv->name.data(), callback, userData, destroyNotify, returnType, WTFMove(parameters)).leakRef(); @@ -647,12 +644,9 @@ JSCValue* jsc_class_add_constructorv(JSCClass* jscClass, const char* name, GCall if (!name) name = priv->name.data(); - Vector parameters; - if (parametersCount) { - parameters.reserveInitialCapacity(parametersCount); - for (unsigned i = 0; i < parametersCount; ++i) - parameters.uncheckedAppend(parameterTypes[i]); - } + Vector parameters(parametersCount, [&](size_t i) -> GType { + return parameterTypes[i]; + }); return jscClassCreateConstructor(jscClass, name ? name : priv->name.data(), callback, userData, destroyNotify, returnType, WTFMove(parameters)).leakRef(); } @@ -739,12 +733,9 @@ void jsc_class_add_method(JSCClass* jscClass, const char* name, GCallback callba va_list args; va_start(args, paramCount); - Vector parameters; - if (paramCount) { - parameters.reserveInitialCapacity(paramCount); - for (unsigned i = 0; i < paramCount; ++i) - parameters.uncheckedAppend(va_arg(args, GType)); - } + Vector parameters(paramCount, [&](size_t) -> GType { + return va_arg(args, GType); + }); va_end(args); jscClassAddMethod(jscClass, name, callback, userData, destroyNotify, returnType, WTFMove(parameters)); @@ -779,12 +770,9 @@ void jsc_class_add_methodv(JSCClass* jscClass, const char* name, GCallback callb g_return_if_fail(!parametersCount || parameterTypes); g_return_if_fail(jscClass->priv->context); - Vector parameters; - if (parametersCount) { - parameters.reserveInitialCapacity(parametersCount); - for (unsigned i = 0; i < parametersCount; ++i) - parameters.uncheckedAppend(parameterTypes[i]); - } + Vector parameters(parametersCount, [&](size_t i) -> GType { + return parameterTypes[i]; + }); jscClassAddMethod(jscClass, name, callback, userData, destroyNotify, returnType, WTFMove(parameters)); } diff --git a/vendor/webkit/Source/JavaScriptCore/API/glib/JSCContext.cpp b/vendor/webkit/Source/JavaScriptCore/API/glib/JSCContext.cpp index 104700d8..dd6f19b1 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/glib/JSCContext.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/glib/JSCContext.cpp @@ -980,11 +980,11 @@ JSCCheckSyntaxResult jsc_context_check_syntax(JSCContext* context, const char* c switch (mode) { case JSC_CHECK_SYNTAX_MODE_SCRIPT: success = !!JSC::parse(vm, source, JSC::Identifier(), JSC::ImplementationVisibility::Public, JSC::JSParserBuiltinMode::NotBuiltin, - JSC::JSParserStrictMode::NotStrict, JSC::JSParserScriptMode::Classic, JSC::SourceParseMode::ProgramMode, JSC::SuperBinding::NotNeeded, error); + JSC::JSParserStrictMode::NotStrict, JSC::JSParserScriptMode::Classic, JSC::SourceParseMode::ProgramMode, JSC::FunctionMode::None, JSC::SuperBinding::NotNeeded, error); break; case JSC_CHECK_SYNTAX_MODE_MODULE: success = !!JSC::parse(vm, source, JSC::Identifier(), JSC::ImplementationVisibility::Public, JSC::JSParserBuiltinMode::NotBuiltin, - JSC::JSParserStrictMode::Strict, JSC::JSParserScriptMode::Module, JSC::SourceParseMode::ModuleAnalyzeMode, JSC::SuperBinding::NotNeeded, error); + JSC::JSParserStrictMode::Strict, JSC::JSParserScriptMode::Module, JSC::SourceParseMode::ModuleAnalyzeMode, JSC::FunctionMode::None, JSC::SuperBinding::NotNeeded, error); break; } diff --git a/vendor/webkit/Source/JavaScriptCore/API/glib/JSCValue.cpp b/vendor/webkit/Source/JavaScriptCore/API/glib/JSCValue.cpp index 75d1e8c2..3bdd9972 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/glib/JSCValue.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/glib/JSCValue.cpp @@ -1001,12 +1001,9 @@ JSCValue* jsc_value_object_invoke_methodv(JSCValue* value, const char* name, uns if (jscContextHandleExceptionIfNeeded(priv->context.get(), exception)) return jsc_value_new_undefined(priv->context.get()); - Vector arguments; - if (parametersCount) { - arguments.reserveInitialCapacity(parametersCount); - for (unsigned i = 0; i < parametersCount; ++i) - arguments.uncheckedAppend(jscValueGetJSValue(parameters[i])); - } + Vector arguments(parametersCount, [&](size_t i) { + return jscValueGetJSValue(parameters[i]); + }); auto result = jsObjectCall(jsContext, function, JSC::JSCCallbackFunction::Type::Method, object, arguments, &exception); if (jscContextHandleExceptionIfNeeded(priv->context.get(), exception)) @@ -1206,12 +1203,9 @@ JSCValue* jsc_value_new_function(JSCContext* context, const char* name, GCallbac va_list args; va_start(args, paramCount); - Vector parameters; - if (paramCount) { - parameters.reserveInitialCapacity(paramCount); - for (unsigned i = 0; i < paramCount; ++i) - parameters.uncheckedAppend(va_arg(args, GType)); - } + Vector parameters(paramCount, [&](size_t) -> GType { + return va_arg(args, GType); + }); va_end(args); return jscValueFunctionCreate(context, name, callback, userData, destroyNotify, returnType, WTFMove(parameters)).leakRef(); @@ -1246,12 +1240,9 @@ JSCValue* jsc_value_new_functionv(JSCContext* context, const char* name, GCallba g_return_val_if_fail(callback, nullptr); g_return_val_if_fail(!parametersCount || parameterTypes, nullptr); - Vector parameters; - if (parametersCount) { - parameters.reserveInitialCapacity(parametersCount); - for (unsigned i = 0; i < parametersCount; ++i) - parameters.uncheckedAppend(parameterTypes[i]); - } + Vector parameters(parametersCount, [&](size_t i) -> GType { + return parameterTypes[i]; + }); return jscValueFunctionCreate(context, name, callback, userData, destroyNotify, returnType, WTFMove(parameters)).leakRef(); } @@ -1363,12 +1354,9 @@ JSCValue* jsc_value_function_callv(JSCValue* value, unsigned parametersCount, JS if (jscContextHandleExceptionIfNeeded(priv->context.get(), exception)) return jsc_value_new_undefined(priv->context.get()); - Vector arguments; - if (parametersCount) { - arguments.reserveInitialCapacity(parametersCount); - for (unsigned i = 0; i < parametersCount; ++i) - arguments.uncheckedAppend(jscValueGetJSValue(parameters[i])); - } + Vector arguments(parametersCount, [&](size_t i) { + return jscValueGetJSValue(parameters[i]); + }); auto result = jsObjectCall(jsContext, function, JSC::JSCCallbackFunction::Type::Function, nullptr, arguments, &exception); if (jscContextHandleExceptionIfNeeded(priv->context.get(), exception)) @@ -1449,12 +1437,9 @@ JSCValue* jsc_value_constructor_callv(JSCValue* value, unsigned parametersCount, if (jscContextHandleExceptionIfNeeded(priv->context.get(), exception)) return jsc_value_new_undefined(priv->context.get()); - Vector arguments; - if (parametersCount) { - arguments.reserveInitialCapacity(parametersCount); - for (unsigned i = 0; i < parametersCount; ++i) - arguments.uncheckedAppend(jscValueGetJSValue(parameters[i])); - } + Vector arguments(parametersCount, [&](size_t i) { + return jscValueGetJSValue(parameters[i]); + }); auto result = jsObjectCall(jsContext, function, JSC::JSCCallbackFunction::Type::Constructor, nullptr, arguments, &exception); if (jscContextHandleExceptionIfNeeded(priv->context.get(), exception)) diff --git a/vendor/webkit/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp b/vendor/webkit/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp index 23d8714e..8b6db53a 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp @@ -184,7 +184,7 @@ int testExecutionTimeLimit() timeLimit = 100_ms + tierAdjustment; JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit.seconds(), shouldTerminateCallback, nullptr); { -#if OS(LINUX) && (CPU(MIPS) || CPU(ARM_THUMB2)) +#if OS(LINUX) && CPU(ARM_THUMB2) Seconds timeAfterWatchdogShouldHaveFired = 500_ms + tierAdjustment; #else Seconds timeAfterWatchdogShouldHaveFired = 300_ms + tierAdjustment; diff --git a/vendor/webkit/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp b/vendor/webkit/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp index 32fe108c..2f740659 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp @@ -66,7 +66,7 @@ int testFunctionOverrides() "'function f1() { /* Overridden f1 */ }\\n" "function() { /* Overridden f2 */ }\\n" "function() { /* Overridden f3 */ }\\n" - "function anonymous() { /* Overridden f4 */ }\\n';" + "function anonymous(\\n) { /* Overridden f4 */ }\\n';" "var result = (str == expectedStr);" "\n" "result"; diff --git a/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.c b/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.c index 0bd35fec..a30c1f80 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.c +++ b/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.c @@ -1846,7 +1846,7 @@ int main(int argc, char* argv[]) ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception)); ASSERT(JSValueIsObject(context, exception)); v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL); - assertEqualsAsNumber(v, 2); + assertEqualsAsNumber(v, 3); JSStringRelease(functionBody); JSStringRelease(line); @@ -1856,7 +1856,7 @@ int main(int argc, char* argv[]) ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, -42, &exception)); ASSERT(JSValueIsObject(context, exception)); v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL); - assertEqualsAsNumber(v, 2); + assertEqualsAsNumber(v, 3); JSStringRelease(functionBody); JSStringRelease(line); @@ -1866,7 +1866,7 @@ int main(int argc, char* argv[]) ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception)); ASSERT(JSValueIsObject(context, exception)); v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL); - assertEqualsAsNumber(v, 3); + assertEqualsAsNumber(v, 4); JSStringRelease(functionBody); JSStringRelease(line); @@ -1900,7 +1900,7 @@ int main(int argc, char* argv[]) JSStringRelease(functionBody); string = JSValueToStringCopy(context, function, NULL); - assertEqualsAsUTF8String(JSValueMakeString(context, string), "function foo(foo) {\nreturn foo;\n}"); + assertEqualsAsUTF8String(JSValueMakeString(context, string), "function foo(foo\n) {\nreturn foo;\n}"); JSStringRelease(string); JSStringRef print = JSStringCreateWithUTF8CString("print"); diff --git a/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.cpp b/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.cpp index 0e26d2dd..d5c439f6 100644 --- a/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.cpp +++ b/vendor/webkit/Source/JavaScriptCore/API/tests/testapi.cpp @@ -28,6 +28,7 @@ #include "APICast.h" #include "JSGlobalObjectInlines.h" #include "MarkedJSValueRefArray.h" +#include "RegisterTZoneTypes.h" #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include #include diff --git a/vendor/webkit/Source/JavaScriptCore/CMakeLists.txt b/vendor/webkit/Source/JavaScriptCore/CMakeLists.txt index ca891ade..5fbb9a3b 100644 --- a/vendor/webkit/Source/JavaScriptCore/CMakeLists.txt +++ b/vendor/webkit/Source/JavaScriptCore/CMakeLists.txt @@ -25,6 +25,7 @@ set(JavaScriptCore_PRIVATE_INCLUDE_DIRECTORIES "${JAVASCRIPTCORE_DIR}/disassembler/zydis/Zydis" "${JAVASCRIPTCORE_DIR}/domjit" "${JAVASCRIPTCORE_DIR}/ftl" + "${JAVASCRIPTCORE_DIR}/fuzzilli" "${JAVASCRIPTCORE_DIR}/heap" "${JAVASCRIPTCORE_DIR}/debugger" "${JAVASCRIPTCORE_DIR}/inspector" @@ -242,7 +243,6 @@ set(OFFLINE_ASM offlineasm/cloop.rb offlineasm/config.rb offlineasm/instructions.rb - offlineasm/mips.rb offlineasm/offsets.rb offlineasm/opt.rb offlineasm/parser.rb @@ -289,7 +289,17 @@ list(APPEND JavaScriptCore_HEADERS ) if (WIN32) - set(OFFLINE_ASM_BACKEND "X86_WIN, X86_64_WIN, C_LOOP_WIN") + if (WTF_CPU_X86) + set(OFFLINE_ASM_BACKEND "X86_WIN") + elseif (WTF_CPU_X86_64) + set(OFFLINE_ASM_BACKEND "X86_64_WIN") + endif () + + if (NOT ENABLE_JIT) + if (ENABLE_C_LOOP) + set(OFFLINE_ASM_BACKEND "C_LOOP_WIN") + endif () + endif () else () if (WTF_CPU_X86) set(OFFLINE_ASM_BACKEND "X86") @@ -299,8 +309,6 @@ else () set(OFFLINE_ASM_BACKEND "ARM64") elseif (ARM_THUMB2_DETECTED) set(OFFLINE_ASM_BACKEND "ARMv7") - elseif (WTF_CPU_MIPS) - set(OFFLINE_ASM_BACKEND "MIPS") elseif (WTF_CPU_RISCV64) set(OFFLINE_ASM_BACKEND "RISCV64") endif () @@ -478,22 +486,8 @@ add_custom_command( # the .cpp files below is similar to the one in the previous comment. However, since these .cpp # files are used to build JavaScriptCore itself, we can just add LLIntAssembly.h to JSC_HEADERS # since it is used in the add_library() call at the end of this file. -if (MSVC AND NOT ENABLE_C_LOOP) - enable_language(ASM_MASM) - if (CMAKE_SIZEOF_VOID_P EQUAL 4) - # Win32 needs /safeseh with assembly, but Win64 does not. - set(MASM_EXECUTABLE ml) - set(LLINT_MASM_FLAGS /safeseh /c /Fo) - else () - set(MASM_EXECUTABLE ml64) - set(LLINT_MASM_FLAGS /c /Fo) - endif () - add_custom_command( - OUTPUT ${JavaScriptCore_DERIVED_SOURCES_DIR}/LowLevelInterpreterWin.obj - DEPENDS ${JavaScriptCore_DERIVED_SOURCES_DIR}/LowLevelInterpreterWin.asm - COMMAND ${MASM_EXECUTABLE} ${LLINT_MASM_FLAGS} ${JavaScriptCore_DERIVED_SOURCES_DIR}/LowLevelInterpreterWin.obj ${JavaScriptCore_DERIVED_SOURCES_DIR}/LowLevelInterpreterWin.asm - VERBATIM) - list(APPEND JavaScriptCore_SOURCES ${JavaScriptCore_DERIVED_SOURCES_DIR}/LowLevelInterpreterWin.obj) +if (MSVC AND WTF_CPU_X86_64 AND NOT ENABLE_C_LOOP) + list(APPEND JavaScriptCore_SOURCES ${JavaScriptCore_DERIVED_SOURCES_DIR}/LowLevelInterpreterWin.asm) add_library(LowLevelInterpreterLib OBJECT llint/LowLevelInterpreter.cpp) else () # As there's poor toolchain support for using `.file` directives in @@ -615,14 +609,11 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS assembler/JITOperationList.h assembler/JITOperationValidation.h assembler/LinkBuffer.h - assembler/MIPSAssembler.h - assembler/MIPSRegisters.h assembler/MacroAssembler.h assembler/MacroAssemblerARM64.h assembler/MacroAssemblerARMv7.h assembler/MacroAssemblerCodeRef.h assembler/MacroAssemblerHelpers.h - assembler/MacroAssemblerMIPS.h assembler/MacroAssemblerRISCV64.h assembler/MacroAssemblerX86Common.h assembler/MacroAssemblerX86_64.h @@ -652,6 +643,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS bytecode/BytecodeIntrinsicRegistry.h bytecode/CallEdge.h bytecode/CallLinkInfo.h + bytecode/CallLinkInfoBase.h bytecode/CallMode.h bytecode/CallVariant.h bytecode/CallVariantInlines.h @@ -670,7 +662,8 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS bytecode/ExitKind.h bytecode/ExitingInlineKind.h bytecode/ExitingJITType.h - bytecode/ExpressionRangeInfo.h + bytecode/ExpressionInfo.h + bytecode/ExpressionInfoInlines.h bytecode/HandlerInfo.h bytecode/ICStatusMap.h bytecode/InlineCallFrame.h @@ -682,6 +675,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h bytecode/LazyOperandValueProfile.h bytecode/LazyValueProfile.h + bytecode/LineColumn.h bytecode/LinkTimeConstant.h bytecode/MetadataTable.h bytecode/ObjectAllocationProfile.h @@ -1075,6 +1069,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS runtime/IndexingType.h runtime/InferredValue.h runtime/InitializeThreading.h + runtime/InlineAttribute.h runtime/Int16Array.h runtime/Int32Array.h runtime/Int8Array.h @@ -1226,6 +1221,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS runtime/ScriptFetchParameters.h runtime/ScriptFetcher.h runtime/ShadowRealmObject.h + runtime/SideDataRepository.h runtime/SlowPathFunction.h runtime/SmallStrings.h runtime/SourceOrigin.h @@ -1297,6 +1293,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS wasm/WasmCalleeGroup.h wasm/WasmCallsiteCollection.h wasm/WasmCapabilities.h + wasm/WasmCompilationContext.h wasm/WasmCompilationMode.h wasm/WasmContext.h wasm/WasmCreationMode.h @@ -1306,6 +1303,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS wasm/WasmFunctionCodeBlockGenerator.h wasm/WasmFunctionIPIntMetadataGenerator.h wasm/WasmHandlerInfo.h + wasm/WasmIPIntTierUpCounter.h wasm/WasmIPIntGenerator.h wasm/WasmIndexOrName.h wasm/WasmJS.h diff --git a/vendor/webkit/Source/JavaScriptCore/Configurations/Base.xcconfig b/vendor/webkit/Source/JavaScriptCore/Configurations/Base.xcconfig index f419ffd9..68c280b9 100644 --- a/vendor/webkit/Source/JavaScriptCore/Configurations/Base.xcconfig +++ b/vendor/webkit/Source/JavaScriptCore/Configurations/Base.xcconfig @@ -27,6 +27,7 @@ ALWAYS_SEARCH_USER_PATHS = NO; CLANG_CXX_LANGUAGE_STANDARD = c++2a; CLANG_CXX_LIBRARY = libc++; +CLANG_ENABLE_EXPLICIT_MODULES = NO; CLANG_ENABLE_OBJC_WEAK = YES; CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; CLANG_WARN_BOOL_CONVERSION = YES; @@ -100,7 +101,7 @@ SYSTEM_HEADER_SEARCH_PATHS = $(WK_PRIVATE_SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH AD_HOC_CODE_SIGNING_ALLOWED = YES; CODE_SIGN_IDENTITY = -; -SUPPORTED_PLATFORMS = iphoneos iphonesimulator macosx appletvos appletvsimulator watchos watchsimulator; +SUPPORTED_PLATFORMS = iphoneos iphonesimulator macosx appletvos appletvsimulator watchos watchsimulator xros xrsimulator; SUPPORTS_MACCATALYST = YES; FRAMEWORK_SEARCH_PATHS = $(WK_QUOTED_OVERRIDE_FRAMEWORKS_DIR); @@ -155,3 +156,30 @@ WK_DEFAULT_LTO_MODE = $(WK_USER_LTO_MODE_thin); WK_PROCESSED_XCENT_FILE=$(TEMP_FILE_DIR)/$(FULL_PRODUCT_NAME).entitlements WK_USE_RESTRICTED_ENTITLEMENTS = $(USE_INTERNAL_SDK); + +// Shared variables used for dynamic or static linking of JavaScriptCore and jsc. + +// Tomorrow me will be smart enough to figure out how to do this properly. +JSC_SEC_LD_FLAGS[sdk=iphoneos*] = -weak_framework BrowserEngineCore; +JSC_SEC_LD_FLAGS[sdk=iphoneos17.0*] = ; +JSC_SEC_LD_FLAGS[sdk=iphoneos17.1*] = ; +JSC_SEC_LD_FLAGS[sdk=iphoneos17.2*] = ; +JSC_SEC_LD_FLAGS[sdk=iphoneos17.3*] = ; +JSC_SEC_LD_FLAGS[sdk=iphoneos17.3*] = ; +JSC_SEC_LD_FLAGS[sdk=iphoneos18*] = ; +JSC_SEC_LD_FLAGS[sdk=appletv*] = ; +JSC_SEC_LD_FLAGS[sdk=watch*] = ; +JSC_SEC_LD_FLAGS[sdk=xr*] = ; + +OTHER_LDFLAGS_JAVASCRIPTCORE_DEPS = $(JSC_SEC_LD_FLAGS) -fobjc-link-runtime -licucore -framework Security; + +WTF_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libWTF.a; +WTF_ARCHIVE[config=Production] = $(SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH)$(WK_LIBRARY_INSTALL_PATH)/libWTF.a; + +BMALLOC_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libbmalloc.a; +BMALLOC_ARCHIVE[config=Production] = $(SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH)$(WK_LIBRARY_INSTALL_PATH)/libbmalloc.a; + +LIBPAS_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libpas.a; +LIBPAS_ARCHIVE[config=Production] = $(SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH)$(WK_LIBRARY_INSTALL_PATH)/libpas.a; + +LIBJAVASCRIPTCORE_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libJavaScriptCore.a; diff --git a/vendor/webkit/Source/JavaScriptCore/Configurations/BaseTarget.xcconfig b/vendor/webkit/Source/JavaScriptCore/Configurations/BaseTarget.xcconfig new file mode 100644 index 00000000..e768ce26 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/Configurations/BaseTarget.xcconfig @@ -0,0 +1,34 @@ +// Copyright (C) 2009-2023 Apple Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "../../../Configurations/Version.xcconfig" + +GCC_PREFIX_HEADER = JavaScriptCorePrefix.h; +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(ENABLE_LLVM_PROFILE_GENERATION) PAS_BMALLOC_HIDDEN; +GCC_SYMBOLS_PRIVATE_EXTERN = YES; +OTHER_CFLAGS = $(inherited) -fno-slp-vectorize --system-header-prefix=unicode/ -D__STDC_WANT_LIB_EXT1__=1; +HEADER_SEARCH_PATHS = "${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore" $(HEADER_SEARCH_PATHS); +PRODUCT_NAME = JavaScriptCore; + +CLANG_INSTRUMENT_FOR_OPTIMIZATION_PROFILING = $(CLANG_INSTRUMENT_FOR_OPTIMIZATION_PROFILING_$(ENABLE_LLVM_PROFILE_GENERATION)); +CLANG_INSTRUMENT_FOR_OPTIMIZATION_PROFILING_ENABLE_LLVM_PROFILE_GENERATION = YES; diff --git a/vendor/webkit/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig b/vendor/webkit/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig index ae8259ce..c311fefe 100644 --- a/vendor/webkit/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig +++ b/vendor/webkit/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig @@ -34,6 +34,9 @@ CODE_SIGN_IDENTITY_ = $(CODE_SIGN_IDENTITY_NO); CODE_SIGN_IDENTITY_NO = -; CODE_SIGN_IDENTITY_YES = $(WK_ENGINEERING_CODE_SIGN_IDENTITY); +CODE_SIGN_IDENTITY[sdk=embedded] = $(CODE_SIGN_IDENTITY_EMBEDDED_$(USE_INTERNAL_SDK)); +CODE_SIGN_IDENTITY_EMBEDDED_YES = -; + SDKROOT = $(SDKROOT_$(USE_INTERNAL_SDK)); SDKROOT_ = macosx; SDKROOT_YES = macosx.internal; diff --git a/vendor/webkit/Source/JavaScriptCore/Configurations/JSC.xcconfig b/vendor/webkit/Source/JavaScriptCore/Configurations/JSC.xcconfig index 0415371e..478462d3 100644 --- a/vendor/webkit/Source/JavaScriptCore/Configurations/JSC.xcconfig +++ b/vendor/webkit/Source/JavaScriptCore/Configurations/JSC.xcconfig @@ -1,4 +1,4 @@ -// Copyright (C) 2011-2022 Apple Inc. All rights reserved. +// Copyright (C) 2011-2023 Apple Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -25,7 +25,11 @@ INSTALL_PATH = $(INSTALL_PATH_PREFIX)$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/$(JAVASCRIPTCORE_HELPERS_DIR); -OTHER_LDFLAGS = $(inherited) $(WK_RELOCATABLE_FRAMEWORKS_LDFLAGS) $(SOURCE_VERSION_LDFLAGS); +JSC_USE_STATIC_LINKING = $(WK_NOT_$(WK_NOT_$(ENABLE_FUZZILLI))); + +OTHER_LDFLAGS = $(inherited) $(OTHER_LDFLAGS_STATIC_LINKING_$(JSC_USE_STATIC_LINKING)); +OTHER_LDFLAGS_STATIC_LINKING_YES = $(OTHER_LDFLAGS_JAVASCRIPTCORE_DEPS) $(WTF_ARCHIVE) $(BMALLOC_ARCHIVE) $(LIBPAS_ARCHIVE) $(LIBJAVASCRIPTCORE_ARCHIVE); +OTHER_LDFLAGS_STATIC_LINKING_NO = -framework JavaScriptCore $(WK_RELOCATABLE_FRAMEWORKS_LDFLAGS) $(SOURCE_VERSION_LDFLAGS); WK_RELOCATABLE_FRAMEWORKS_LDFLAGS = $(WK_RELOCATABLE_FRAMEWORKS_LDFLAGS_$(WK_RELOCATABLE_FRAMEWORKS)); WK_RELOCATABLE_FRAMEWORKS_LDFLAGS_YES = -Wl,-dyld_env,DYLD_FRAMEWORK_PATH=@executable_path/../../../..; diff --git a/vendor/webkit/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig b/vendor/webkit/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig index 6bef4a72..6e9c2f39 100644 --- a/vendor/webkit/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig +++ b/vendor/webkit/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig @@ -19,9 +19,9 @@ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "../../../Configurations/Version.xcconfig" +#include "BaseTarget.xcconfig" DEFINES_MODULE = YES; MODULEMAP_FILE = $(SRCROOT)/JavaScriptCore.modulemap; @@ -46,59 +46,33 @@ TAPI_USE_SRCROOT = $(TAPI_USE_SRCROOT$(WK_XCODE_15)); TAPI_USE_SRCROOT_XCODE_SINCE_15 = YES; TAPI_ENABLE_PROJECT_HEADERS = YES; TAPI_VERIFY_MODE = Pedantic; -// C++ dialect flags (-std, -fvisibility, -fno-rtti) are needed because JavaScriptCore / WTF private headers are only safe to use from other WebKit projects, which build with the same dialect. +// C++ dialect flags (-fvisibility, others inherited from CommonBase.xcconfig) +// are needed because JavaScriptCore / WTF private headers are only safe to use +// from other WebKit projects, which build with the same dialect. OTHER_TAPI_FLAGS = $(inherited) -fvisibility=hidden -exclude-private-header **/*SPI.h -extra-project-header $(SRCROOT)/API/ExtraSymbolsForTAPI.h $(OTHER_TAPI_FLAGS_STATICLIBS_$(DEPLOYMENT_LOCATION)); // JavaScriptCore exports libWTF.a and libbmalloc.a, so tapi needs to know about headers from those projects. The extra filelists are generated by their respective projects. In install-style builds, there is a postprocessing step done as a JavaScriptCore build phase. OTHER_TAPI_FLAGS_STATICLIBS_NO = -filelist $(BUILT_PRODUCTS_DIR)/usr/local/include/bmalloc/bmalloc.json -filelist $(BUILT_PRODUCTS_DIR)/usr/local/include/wtf/WTF.json OTHER_TAPI_FLAGS_STATICLIBS_YES = -filelist $(DERIVED_FILE_DIR)/bmalloc.json -filelist $(DERIVED_FILE_DIR)/WTF.json WK_ERROR_WHEN_LINKING_WITH_STATIC_INITIALIZERS = -Xlinker -no_inits; -WK_NO_STATIC_INITIALIZERS = $(WK_NO_STATIC_INITIALIZERS_$(CONFIGURATION)_$(WK_ANY_SANITIZER_ENABLED)_$(ENABLE_LLVM_PROFILE_GENERATION)); +WK_NO_STATIC_INITIALIZERS = $(WK_NO_STATIC_INITIALIZERS_$(CONFIGURATION)_$(WK_OR_$(WK_ANY_SANITIZER_ENABLED)_$(ENABLE_FUZZILLI))_$(ENABLE_LLVM_PROFILE_GENERATION)); WK_NO_STATIC_INITIALIZERS_Release__ = $(WK_ERROR_WHEN_LINKING_WITH_STATIC_INITIALIZERS); WK_NO_STATIC_INITIALIZERS_Release_NO_ = $(WK_ERROR_WHEN_LINKING_WITH_STATIC_INITIALIZERS); WK_NO_STATIC_INITIALIZERS_Production__ = $(WK_ERROR_WHEN_LINKING_WITH_STATIC_INITIALIZERS); WK_NO_STATIC_INITIALIZERS_Production_NO_ = $(WK_ERROR_WHEN_LINKING_WITH_STATIC_INITIALIZERS); -OTHER_LDFLAGS_BASE = -unexported_symbols_list $(SRCROOT)/unexported-libc++.txt -force_load $(WTF_ARCHIVE) -force_load $(BMALLOC_ARCHIVE) -load_hidden $(LIBPAS_ARCHIVE) $(SOURCE_VERSION_LDFLAGS) $(WK_NO_STATIC_INITIALIZERS); +OTHER_LDFLAGS_BASE = $(OTHER_LDFLAGS_JAVASCRIPTCORE_DEPS) -unexported_symbols_list $(SRCROOT)/unexported-libc++.txt -force_load $(WTF_ARCHIVE) -force_load $(BMALLOC_ARCHIVE) -force_load $(LIBJAVASCRIPTCORE_ARCHIVE) -load_hidden $(LIBPAS_ARCHIVE) $(SOURCE_VERSION_LDFLAGS) $(WK_NO_STATIC_INITIALIZERS); OTHER_LDFLAGS[sdk=embedded*] = $(inherited) $(OTHER_LDFLAGS_BASE); OTHER_LDFLAGS[sdk=macosx*] = $(inherited) $(OTHER_LDFLAGS_BASE) -framework CoreServices $(PROFILE_GENERATE_OR_USE_LDFLAGS); -WTF_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libWTF.a; -WTF_ARCHIVE[config=Production] = $(SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH)$(WK_LIBRARY_INSTALL_PATH)/libWTF.a; - -BMALLOC_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libbmalloc.a; -BMALLOC_ARCHIVE[config=Production] = $(SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH)$(WK_LIBRARY_INSTALL_PATH)/libbmalloc.a; - -LIBPAS_ARCHIVE = $(BUILT_PRODUCTS_DIR)/libpas.a; -LIBPAS_ARCHIVE[config=Production] = $(SDK_DIR)$(WK_ALTERNATE_WEBKIT_SDK_PATH)$(WK_LIBRARY_INSTALL_PATH)/libpas.a; - SECTORDER_FLAGS = $(SECTORDER_FLAGS_$(CONFIGURATION)); SECTORDER_FLAGS_Production[sdk=macosx*] = -Wl,-order_file,JavaScriptCore.order; -CLANG_OPTIMIZATION_PROFILE_FILE = $(BUILT_PRODUCTS_DIR)/DerivedSources/JavaScriptCore/JavaScriptCore.profdata; -WK_COMPRESSED_OPTIMIZATION_PROFILE_FILE = $(WK_COMPRESSED_OPTIMIZATION_PROFILE_FILE_$(USE_INTERNAL_SDK)); -WK_COMPRESSED_OPTIMIZATION_PROFILE_FILE_YES = $(WK_WEBKITADDITIONS_HEADERS_FOLDER_PATH)/Profiling/JavaScriptCore.profdata.compressed; - -CLANG_USE_OPTIMIZATION_PROFILE = $(CLANG_USE_OPTIMIZATION_PROFILE_$(USE_INTERNAL_SDK)_$(CONFIGURATION)_$(WK_PLATFORM_NAME)); -CLANG_USE_OPTIMIZATION_PROFILE_YES_Release_macosx = YES; -CLANG_USE_OPTIMIZATION_PROFILE_YES_Release_iphoneos = YES; -CLANG_USE_OPTIMIZATION_PROFILE_YES_Production_macosx = YES; -CLANG_USE_OPTIMIZATION_PROFILE_YES_Production_iphoneos = YES; - -CLANG_INSTRUMENT_FOR_OPTIMIZATION_PROFILING = $(CLANG_INSTRUMENT_FOR_OPTIMIZATION_PROFILING_$(ENABLE_LLVM_PROFILE_GENERATION)); -CLANG_INSTRUMENT_FOR_OPTIMIZATION_PROFILING_ENABLE_LLVM_PROFILE_GENERATION = YES; - -GCC_PREFIX_HEADER = JavaScriptCorePrefix.h; -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(ENABLE_LLVM_PROFILE_GENERATION) PAS_BMALLOC_HIDDEN; -GCC_SYMBOLS_PRIVATE_EXTERN = YES; -OTHER_CFLAGS = $(inherited) -fno-slp-vectorize --system-header-prefix=unicode/ -D__STDC_WANT_LIB_EXT1__=1; -HEADER_SEARCH_PATHS = "${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore" $(HEADER_SEARCH_PATHS); INFOPLIST_FILE = Info.plist; INSTALL_PATH = $(INSTALL_PATH_PREFIX)$(JAVASCRIPTCORE_FRAMEWORKS_DIR); DYLIB_INSTALL_NAME_BASE = $(DYLIB_INSTALL_NAME_BASE_$(WK_USE_ALTERNATE_FRAMEWORKS_DIR)); DYLIB_INSTALL_NAME_BASE_NO = $(NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR) DYLIB_INSTALL_NAME_BASE_YES = $(JAVASCRIPTCORE_FRAMEWORKS_DIR); -PRODUCT_NAME = JavaScriptCore; PRODUCT_BUNDLE_IDENTIFIER = com.apple.$(PRODUCT_NAME:rfc1034identifier); ALTERNATE_ROOT_PATH = $(JAVASCRIPTCORE_FRAMEWORKS_DIR); @@ -111,6 +85,3 @@ APPLY_RULES_IN_COPY_HEADERS = YES; JSC_FRAMEWORK_HEADER_POSTPROCESSING_DISABLED = YES; JSC_FRAMEWORK_HEADER_POSTPROCESSING_DISABLED[config=Production] = $(WK_USE_OVERRIDE_FRAMEWORKS_DIR); - -EXCLUDED_SOURCE_FILE_NAMES = $(inherited); -EXCLUDED_SOURCE_FILE_NAMES[sdk=iphone*] = $(inherited) framework.sb; diff --git a/vendor/webkit/Source/JavaScriptCore/Configurations/libJavaScriptCore.xcconfig b/vendor/webkit/Source/JavaScriptCore/Configurations/libJavaScriptCore.xcconfig new file mode 100644 index 00000000..359febd9 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/Configurations/libJavaScriptCore.xcconfig @@ -0,0 +1,41 @@ +// Copyright (C) 2009-2023 Apple Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "BaseTarget.xcconfig" + +CLANG_OPTIMIZATION_PROFILE_FILE = $(BUILT_PRODUCTS_DIR)/DerivedSources/JavaScriptCore/JavaScriptCore.profdata; +WK_COMPRESSED_OPTIMIZATION_PROFILE_FILE = $(WK_COMPRESSED_OPTIMIZATION_PROFILE_FILE_$(USE_INTERNAL_SDK)); +WK_COMPRESSED_OPTIMIZATION_PROFILE_FILE_YES = $(WK_WEBKITADDITIONS_HEADERS_FOLDER_PATH)/Profiling/JavaScriptCore.profdata.compressed; + +CLANG_USE_OPTIMIZATION_PROFILE = $(CLANG_USE_OPTIMIZATION_PROFILE_$(USE_INTERNAL_SDK)_$(CONFIGURATION)_$(WK_PLATFORM_NAME)); +CLANG_USE_OPTIMIZATION_PROFILE_YES_Release_macosx = YES; +CLANG_USE_OPTIMIZATION_PROFILE_YES_Release_iphoneos = YES; +CLANG_USE_OPTIMIZATION_PROFILE_YES_Production_macosx = YES; +CLANG_USE_OPTIMIZATION_PROFILE_YES_Production_iphoneos = YES; + +SKIP_INSTALL = YES; + +STRIP_INSTALLED_PRODUCT = NO; + +EXCLUDED_SOURCE_FILE_NAMES = $(inherited); +EXCLUDED_SOURCE_FILE_NAMES[sdk=iphone*] = $(inherited) framework.sb; diff --git a/vendor/webkit/Source/JavaScriptCore/Info.plist b/vendor/webkit/Source/JavaScriptCore/Info.plist index 90d2ee7a..8c8ca859 100644 --- a/vendor/webkit/Source/JavaScriptCore/Info.plist +++ b/vendor/webkit/Source/JavaScriptCore/Info.plist @@ -7,7 +7,7 @@ CFBundleExecutable ${PRODUCT_NAME} CFBundleGetInfoString - ${BUNDLE_VERSION}, Copyright 2003-2023 Apple Inc.; Copyright 1999-2001 Harri Porten <porten@kde.org>; Copyright 2001 Peter Kelly <pmk@post.com>; Copyright 1997-2005 University of Cambridge; Copyright 1991, 2000, 2001 by Lucent Technologies. + ${BUNDLE_VERSION}, Copyright 2003-2024 Apple Inc.; Copyright 1999-2001 Harri Porten <porten@kde.org>; Copyright 2001 Peter Kelly <pmk@post.com>; Copyright 1997-2005 University of Cambridge; Copyright 1991, 2000, 2001 by Lucent Technologies. CFBundleIdentifier ${PRODUCT_BUNDLE_IDENTIFIER} CFBundleInfoDictionaryVersion diff --git a/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.order b/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.order index 261be5f7..b4232283 100644 --- a/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.order +++ b/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.order @@ -81,7 +81,6 @@ __ZN3WTFL16threadEntryPointEPv __ZN3WTF31initializeCurrentThreadInternalEPKc __ZN3WTF20ThreadIdentifierData10initializeEj __ZN3WTF5MutexD1Ev -__ZN3WTF6String29charactersWithNullTerminationEv __ZN3WTF10StringImpl34createWithTerminatingNullCharacterERKS0_ __ZNK3WTF10StringImpl17getData16SlowCaseEv __ZN3WTF15AutodrainedPoolC1Ev diff --git a/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj index 6a282609..62628f1a 100644 --- a/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj +++ b/vendor/webkit/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj @@ -107,7 +107,6 @@ 0F0B839D14BCF46600885B4F /* LLIntThunks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B839814BCF45A00885B4F /* LLIntThunks.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0B83A714BCF50700885B4F /* CodeType.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83A514BCF50400885B4F /* CodeType.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0B83A914BCF56200885B4F /* HandlerInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83A814BCF55E00885B4F /* HandlerInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0B83B114BCF71800885B4F /* CallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0CAEFC1EC4DA6B00970D12 /* JSHeapFinalizerPrivate.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0CAEFA1EC4DA6200970D12 /* JSHeapFinalizerPrivate.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0CAEFF1EC4DA8800970D12 /* HeapFinalizerCallback.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0CAEFE1EC4DA8500970D12 /* HeapFinalizerCallback.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -938,7 +937,12 @@ 43AB26C61C1A535900D82AE6 /* B3MathExtras.h in Headers */ = {isa = PBXBuildFile; fileRef = 43AB26C51C1A52F700D82AE6 /* B3MathExtras.h */; }; 43C392AB1C3BEB0500241F53 /* AssemblerCommon.h in Headers */ = {isa = PBXBuildFile; fileRef = 43C392AA1C3BEB0000241F53 /* AssemblerCommon.h */; settings = {ATTRIBUTES = (Private, ); }; }; 4443AE3316E188D90076F110 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 51F0EB6105C86C6B00E6DF1B /* Foundation.framework */; }; + 4469970B2AFEF6CF008B930C /* JavaScriptCore.framework in Product Dependencies */ = {isa = PBXBuildFile; fileRef = 932F5BD90822A1C700736975 /* JavaScriptCore.framework */; }; + 4487DB832AF825C800AFECAE /* Fuzzilli.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4487DB812AF825C700AFECAE /* Fuzzilli.cpp */; }; + 44F93E0E2AE71FBD00FFA37C /* JavaScriptCoreFramework.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 44F93E0D2AE71F9F00FFA37C /* JavaScriptCoreFramework.cpp */; }; 451539B912DC994500EF7AC4 /* Yarr.h in Headers */ = {isa = PBXBuildFile; fileRef = 451539B812DC994500EF7AC4 /* Yarr.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 4615E46A2B5849F4001D4D53 /* WasmBBQJIT32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4615E4672B5833FB001D4D53 /* WasmBBQJIT32_64.cpp */; }; + 4615E46B2B5849F4001D4D53 /* WasmBBQJIT64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4615E4682B5833FB001D4D53 /* WasmBBQJIT64.cpp */; }; 473DA4A4764C45FE871B0485 /* DefinePropertyAttributes.h in Headers */ = {isa = PBXBuildFile; fileRef = 169948EDE68D4054B01EF797 /* DefinePropertyAttributes.h */; settings = {ATTRIBUTES = (Private, ); }; }; 4B1F22F62900BFC700CB5E66 /* Width.h in Headers */ = {isa = PBXBuildFile; fileRef = 4BBA4CD428FF5FE5003EBFC4 /* Width.h */; settings = {ATTRIBUTES = (Private, ); }; }; 4B46940328984FA800512FDF /* MacroAssemblerARM64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FEB137561BB11EEE00CD5100 /* MacroAssemblerARM64.cpp */; }; @@ -979,6 +983,7 @@ 52EED7942492B870008F4C93 /* FunctionAllowlist.h in Headers */ = {isa = PBXBuildFile; fileRef = 52EED7932492B868008F4C93 /* FunctionAllowlist.h */; }; 52F6C35E1E71EB080081F4CC /* WebAssemblyWrapperFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 52F6C35C1E71EB080081F4CC /* WebAssemblyWrapperFunction.h */; }; 52FDABC32788076B00C15B59 /* WasmIRGeneratorHelpers.h in Headers */ = {isa = PBXBuildFile; fileRef = 52FDABC22788076900C15B59 /* WasmIRGeneratorHelpers.h */; }; + 53091F9A2ABE1F570076CBC4 /* WasmIPIntSlowPaths.h in Headers */ = {isa = PBXBuildFile; fileRef = 53091F982ABE1F570076CBC4 /* WasmIPIntSlowPaths.h */; }; 530A66B91FA3E78B0026A545 /* UnifiedSource3-mm.mm in Sources */ = {isa = PBXBuildFile; fileRef = 530A66B11FA3E77A0026A545 /* UnifiedSource3-mm.mm */; }; 530A66BA1FA3E78B0026A545 /* UnifiedSource4-mm.mm in Sources */ = {isa = PBXBuildFile; fileRef = 530A66B81FA3E77E0026A545 /* UnifiedSource4-mm.mm */; }; 530A66BB1FA3E78B0026A545 /* UnifiedSource5-mm.mm in Sources */ = {isa = PBXBuildFile; fileRef = 530A66B51FA3E77D0026A545 /* UnifiedSource5-mm.mm */; }; @@ -1189,6 +1194,7 @@ 53C4F66B21B1A409002FD009 /* JSAPIGlobalObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 53C4F66A21B1A409002FD009 /* JSAPIGlobalObject.h */; }; 53C6FEEF1E8ADFA900B18425 /* WasmOpcodeOrigin.h in Headers */ = {isa = PBXBuildFile; fileRef = 53C6FEEE1E8ADFA900B18425 /* WasmOpcodeOrigin.h */; }; 53CA730A1EA533D80076049D /* WasmBBQPlan.h in Headers */ = {isa = PBXBuildFile; fileRef = 53CA73081EA533D80076049D /* WasmBBQPlan.h */; }; + 53CBE6532ACF18C0009C083D /* WasmIPIntTierUpCounter.h in Headers */ = {isa = PBXBuildFile; fileRef = 53CBE6522ACF18C0009C083D /* WasmIPIntTierUpCounter.h */; settings = {ATTRIBUTES = (Private, ); }; }; 53D35499240D88BD008950DD /* BytecodeOperandsForCheckpoint.h in Headers */ = {isa = PBXBuildFile; fileRef = 53D35497240D88AD008950DD /* BytecodeOperandsForCheckpoint.h */; }; 53D41EC923C0081A00AE984B /* IterationModeMetadata.h in Headers */ = {isa = PBXBuildFile; fileRef = 53D41EC823C0081000AE984B /* IterationModeMetadata.h */; settings = {ATTRIBUTES = (Private, ); }; }; 53D444DC1DAF08AB00B92784 /* B3WasmAddressValue.h in Headers */ = {isa = PBXBuildFile; fileRef = 53D444DB1DAF08AB00B92784 /* B3WasmAddressValue.h */; }; @@ -1234,6 +1240,9 @@ 62EC9BB71B7EB07C00303AD1 /* CallFrameShuffleData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62EC9BB51B7EB07C00303AD1 /* CallFrameShuffleData.h */; settings = {ATTRIBUTES = (Private, ); }; }; 62F2AA381B0BEDE300610C7A /* DFGLazyNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */; }; 641DF80E2890C7D500F9895F /* WasmSIMDOpcodes.h in Headers */ = {isa = PBXBuildFile; fileRef = 641DF80D2890C7D500F9895F /* WasmSIMDOpcodes.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 642D20D42B476A250030545E /* WasmSlowPaths.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14AB0C92231747B7000250BC /* WasmSlowPaths.cpp */; }; + 642D20D52B476A2E0030545E /* WasmIPIntSlowPaths.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 53091F972ABE1F570076CBC4 /* WasmIPIntSlowPaths.cpp */; }; + 65072B082B6C3DEA0065065C /* RegisterTZoneTypes.h in Headers */ = {isa = PBXBuildFile; fileRef = 65072B062B6C3DEA0065065C /* RegisterTZoneTypes.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6507D29E0E871E5E00D7D896 /* JSTypeInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 6507D2970E871E4A00D7D896 /* JSTypeInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; 651122FD14046A4C002B101D /* JavaScriptCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 932F5BD90822A1C700736975 /* JavaScriptCore.framework */; }; 651122FE14046A4C002B101D /* libedit.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 5D5D8AD00E0D0EBE00F9C692 /* libedit.dylib */; }; @@ -1247,6 +1256,7 @@ 658D3A5619638268003C45D6 /* VMEntryRecord.h in Headers */ = {isa = PBXBuildFile; fileRef = 658D3A5519638268003C45D6 /* VMEntryRecord.h */; settings = {ATTRIBUTES = (Private, ); }; }; 659CDA5B1F6753F200D3E53F /* YarrUnicodeProperties.h in Headers */ = {isa = PBXBuildFile; fileRef = 659CDA5A1F67509800D3E53F /* YarrUnicodeProperties.h */; settings = {ATTRIBUTES = (Private, ); }; }; 65B8392E1BACAD360044E824 /* CachedRecovery.h in Headers */ = {isa = PBXBuildFile; fileRef = 65B8392C1BACA92A0044E824 /* CachedRecovery.h */; }; + 65F85BD42B7F067600D0AC74 /* TZoneInit.h in Headers */ = {isa = PBXBuildFile; fileRef = 65F85BD22B7F067500D0AC74 /* TZoneInit.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6A38CFAA1E32B5AB0060206F /* AsyncStackTrace.h in Headers */ = {isa = PBXBuildFile; fileRef = 6A38CFA81E32B58B0060206F /* AsyncStackTrace.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6A4C2E162A2E353900D56933 /* FunctionToStringTests.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6A4C2E152A2E353900D56933 /* FunctionToStringTests.cpp */; }; 6AD2CB4D19B9140100065719 /* DebuggerEvalEnabler.h in Headers */ = {isa = PBXBuildFile; fileRef = 6AD2CB4C19B9140100065719 /* DebuggerEvalEnabler.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -1376,9 +1386,6 @@ 86ADD1460FDDEA980006EEC2 /* MacroAssemblerARMv7.h in Headers */ = {isa = PBXBuildFile; fileRef = 86ADD1440FDDEA980006EEC2 /* MacroAssemblerARMv7.h */; settings = {ATTRIBUTES = (Private, ); }; }; 86B8690124B89EA400487C95 /* PrivateFieldPutKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 86B8690024B89EA400487C95 /* PrivateFieldPutKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; 86C36EEA0EE1289D00B3DF59 /* MacroAssembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C36EE90EE1289D00B3DF59 /* MacroAssembler.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 86C568E111A213EE0007F7F0 /* MacroAssemblerMIPS.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C568DE11A213EE0007F7F0 /* MacroAssemblerMIPS.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 86C568E211A213EE0007F7F0 /* MIPSAssembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C568DF11A213EE0007F7F0 /* MIPSAssembler.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 86C568E211A213EE0007F7FF /* MIPSRegisters.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C568DF11A213EE0007F7FF /* MIPSRegisters.h */; settings = {ATTRIBUTES = (Private, ); }; }; 86CC85A10EE79A4700288682 /* JITInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 86CC85A00EE79A4700288682 /* JITInlines.h */; }; 86CCEFDE0F413F8900FD7F9E /* JITCode.h in Headers */ = {isa = PBXBuildFile; fileRef = 86CCEFDD0F413F8900FD7F9E /* JITCode.h */; settings = {ATTRIBUTES = (Private, ); }; }; 86D2221A167EF9440024C804 /* testapi.mm in Sources */ = {isa = PBXBuildFile; fileRef = 86D22219167EF9440024C804 /* testapi.mm */; }; @@ -1423,11 +1430,7 @@ 918E15C12447B22700447A56 /* AggregateErrorPrototype.h in Headers */ = {isa = PBXBuildFile; fileRef = 918E15BD2447B22600447A56 /* AggregateErrorPrototype.h */; }; 918E15C32447B22700447A56 /* AggregateErrorConstructor.h in Headers */ = {isa = PBXBuildFile; fileRef = 918E15BF2447B22700447A56 /* AggregateErrorConstructor.h */; }; 93052C350FB792190048FDC3 /* ParserArena.h in Headers */ = {isa = PBXBuildFile; fileRef = 93052C330FB792190048FDC3 /* ParserArena.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 932F5BD30822A1C700736975 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6560A4CF04B3B3E7008AE952 /* CoreFoundation.framework */; }; - 932F5BD60822A1C700736975 /* libobjc.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 51F0EC0705C86C9A00E6DF1B /* libobjc.dylib */; }; - 932F5BD70822A1C700736975 /* libicucore.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 9322A00306C341D3009067BB /* libicucore.dylib */; }; 932F5BDD0822A1C700736975 /* jsc.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 45E12D8806A49B0F00E9DF84 /* jsc.cpp */; }; - 932F5BEA0822A1C700736975 /* JavaScriptCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 932F5BD90822A1C700736975 /* JavaScriptCore.framework */; }; 933040040E6A749400786E6A /* SmallStrings.h in Headers */ = {isa = PBXBuildFile; fileRef = 93303FEA0E6A72C000786E6A /* SmallStrings.h */; settings = {ATTRIBUTES = (Private, ); }; }; 93BFC6D929B344C90030D7BE /* GlobalObjectMethodTable.h in Headers */ = {isa = PBXBuildFile; fileRef = 93BFC6D829B344C80030D7BE /* GlobalObjectMethodTable.h */; settings = {ATTRIBUTES = (Private, ); }; }; 95CA6AD328809E010062D5EC /* ImplementationVisibility.h in Headers */ = {isa = PBXBuildFile; fileRef = 95CA6AD228809E010062D5EC /* ImplementationVisibility.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -1500,7 +1503,6 @@ A503FA1A188E0FB000110F14 /* JavaScriptCallFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = A503FA14188E0FAF00110F14 /* JavaScriptCallFrame.h */; }; A503FA1E188E0FB000110F14 /* JSJavaScriptCallFramePrototype.h in Headers */ = {isa = PBXBuildFile; fileRef = A503FA18188E0FB000110F14 /* JSJavaScriptCallFramePrototype.h */; }; A503FA2A188F105900110F14 /* JSGlobalObjectDebugger.h in Headers */ = {isa = PBXBuildFile; fileRef = A503FA28188F105900110F14 /* JSGlobalObjectDebugger.h */; }; - A5098B041C16AA0200087797 /* Security.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = A5098B031C16AA0200087797 /* Security.framework */; }; A50E4B6218809DD50068A46D /* InspectorRuntimeAgent.h in Headers */ = {isa = PBXBuildFile; fileRef = A50E4B5E18809DD50068A46D /* InspectorRuntimeAgent.h */; settings = {ATTRIBUTES = (Private, ); }; }; A50E4B6418809DD50068A46D /* JSGlobalObjectRuntimeAgent.h in Headers */ = {isa = PBXBuildFile; fileRef = A50E4B6018809DD50068A46D /* JSGlobalObjectRuntimeAgent.h */; }; A51007C1187CC3C600B38879 /* JSGlobalObjectInspectorController.h in Headers */ = {isa = PBXBuildFile; fileRef = A51007BF187CC3C600B38879 /* JSGlobalObjectInspectorController.h */; }; @@ -1600,7 +1602,6 @@ A7299DA617D12858005F5FF9 /* SetConstructor.h in Headers */ = {isa = PBXBuildFile; fileRef = A7299DA417D12858005F5FF9 /* SetConstructor.h */; }; A72FFD64139985A800E5365A /* KeywordLookup.h in Headers */ = {isa = PBXBuildFile; fileRef = A7C225CD1399849C00FF1662 /* KeywordLookup.h */; }; A730B6121250068F009D25B1 /* StrictEvalActivation.h in Headers */ = {isa = PBXBuildFile; fileRef = A730B6101250068F009D25B1 /* StrictEvalActivation.h */; }; - A731B25A130093880040A7FA /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 51F0EB6105C86C6B00E6DF1B /* Foundation.framework */; }; A737810E1799EA2E00817533 /* DFGNaturalLoops.h in Headers */ = {isa = PBXBuildFile; fileRef = A737810B1799EA2E00817533 /* DFGNaturalLoops.h */; }; A7386554118697B400540279 /* SpecializedThunkJIT.h in Headers */ = {isa = PBXBuildFile; fileRef = A7386551118697B400540279 /* SpecializedThunkJIT.h */; }; A7386556118697B400540279 /* ThunkGenerators.h in Headers */ = {isa = PBXBuildFile; fileRef = A7386553118697B400540279 /* ThunkGenerators.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -1877,7 +1878,6 @@ DD546FEC29CD2A5D00A5173B /* AirTmpWidthInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FEC858F2BDACDC70080FF74 /* AirTmpWidthInlines.h */; }; DD5F74F9283EF58D0027A8C6 /* copy-profiling-data.sh in Headers */ = {isa = PBXBuildFile; fileRef = DD5F74F8283EF4380027A8C6 /* copy-profiling-data.sh */; settings = {ATTRIBUTES = (Private, ); }; }; DDB04F41278E569A008D3678 /* libWTF.a in Product Dependencies */ = {isa = PBXBuildFile; fileRef = 1498CAD3214656C400710879 /* libWTF.a */; }; - DDB04F42278E56A2008D3678 /* libWTF.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 1498CAD3214656C400710879 /* libWTF.a */; }; DDE99312278D089000F60D26 /* libWebKitAdditions.a in Product Dependencies */ = {isa = PBXBuildFile; fileRef = DDE9930E278D086600F60D26 /* libWebKitAdditions.a */; }; DE26E9031CB5DD0500D2BE82 /* BuiltinExecutableCreator.h in Headers */ = {isa = PBXBuildFile; fileRef = DE26E9021CB5DD0500D2BE82 /* BuiltinExecutableCreator.h */; }; DEA7E2451BBC677F00D78440 /* JSTypedArrayViewPrototype.h in Headers */ = {isa = PBXBuildFile; fileRef = 53917E7C1B791106000EBD33 /* JSTypedArrayViewPrototype.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -1978,6 +1978,7 @@ E34EDBF71DB5FFC900DC87A5 /* FrameTracers.h in Headers */ = {isa = PBXBuildFile; fileRef = E34EDBF61DB5FFC100DC87A5 /* FrameTracers.h */; settings = {ATTRIBUTES = (Private, ); }; }; E34F930E2322D882002B8DB4 /* JSGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = E34F930C2322D881002B8DB4 /* JSGenerator.h */; settings = {ATTRIBUTES = (Private, ); }; }; E350708A1DC49BBF0089BCD6 /* DOMJITSignature.h in Headers */ = {isa = PBXBuildFile; fileRef = E35070891DC49BB60089BCD6 /* DOMJITSignature.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E352576A2AF97DE400BD4754 /* CallLinkInfoBase.h in Headers */ = {isa = PBXBuildFile; fileRef = E35257682AF97DE300BD4754 /* CallLinkInfoBase.h */; settings = {ATTRIBUTES = (Private, ); }; }; E353C11D24AA4CB7003FBDF3 /* IntlDisplayNames.h in Headers */ = {isa = PBXBuildFile; fileRef = E353C11724AA4CB6003FBDF3 /* IntlDisplayNames.h */; }; E353C11E24AA4CB7003FBDF3 /* IntlDisplayNamesConstructor.h in Headers */ = {isa = PBXBuildFile; fileRef = E353C11824AA4CB6003FBDF3 /* IntlDisplayNamesConstructor.h */; }; E353C12124AA4CB7003FBDF3 /* IntlDisplayNamesPrototype.h in Headers */ = {isa = PBXBuildFile; fileRef = E353C11B24AA4CB7003FBDF3 /* IntlDisplayNamesPrototype.h */; }; @@ -2060,6 +2061,7 @@ E3CDCFAF28DD065A00215350 /* ImportMap.h in Headers */ = {isa = PBXBuildFile; fileRef = E3CDCFAE28DD065A00215350 /* ImportMap.h */; settings = {ATTRIBUTES = (Private, ); }; }; E3D239C91B829C1C00BBEF67 /* JSModuleEnvironment.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D239C71B829C1C00BBEF67 /* JSModuleEnvironment.h */; settings = {ATTRIBUTES = (Private, ); }; }; E3D3515F241B89D7008DC16E /* MarkedJSValueRefArray.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D3515D241B89CE008DC16E /* MarkedJSValueRefArray.h */; }; + E3D4FFE22AF21D96004ED359 /* InlineAttribute.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D4FFE12AF21D96004ED359 /* InlineAttribute.h */; settings = {ATTRIBUTES = (Private, ); }; }; E3D7086E29FA66820061F230 /* ScriptFunctionCall.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D7086C29FA667E0061F230 /* ScriptFunctionCall.h */; settings = {ATTRIBUTES = (Private, ); }; }; E3D877741E65C0A000BE945A /* BytecodeDumper.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D877721E65C08900BE945A /* BytecodeDumper.h */; }; E3E6E9CC28F3C33F00EDE7C0 /* ChainedWatchpoint.h in Headers */ = {isa = PBXBuildFile; fileRef = E3E6E9CB28F3C33F00EDE7C0 /* ChainedWatchpoint.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -2111,6 +2113,7 @@ FE287D02252FB2E800D723F9 /* VerifierSlotVisitor.h in Headers */ = {isa = PBXBuildFile; fileRef = FE287D01252FB2E800D723F9 /* VerifierSlotVisitor.h */; settings = {ATTRIBUTES = (Private, ); }; }; FE2A87601F02381600EB31B2 /* MinimumReservedZoneSize.h in Headers */ = {isa = PBXBuildFile; fileRef = FE2A875F1F02381600EB31B2 /* MinimumReservedZoneSize.h */; }; FE2CC9302756B2B9003F5AB8 /* HeapSubspaceTypes.h in Headers */ = {isa = PBXBuildFile; fileRef = FE2CC92F2756B2B9003F5AB8 /* HeapSubspaceTypes.h */; settings = {ATTRIBUTES = (Private, ); }; }; + FE2D0B382AE242B000A071A7 /* SideDataRepository.h in Headers */ = {isa = PBXBuildFile; fileRef = FE2D0B362AE242AF00A071A7 /* SideDataRepository.h */; settings = {ATTRIBUTES = (Private, ); }; }; FE3022D71E42857300BAC493 /* VMInspector.h in Headers */ = {isa = PBXBuildFile; fileRef = FE3022D51E42856700BAC493 /* VMInspector.h */; }; FE336B5325DB497D0098F034 /* MarkingConstraintExecutorPair.h in Headers */ = {isa = PBXBuildFile; fileRef = FE336B5225DB497D0098F034 /* MarkingConstraintExecutorPair.h */; }; FE3422121D6B81C30032BE88 /* ThrowScope.h in Headers */ = {isa = PBXBuildFile; fileRef = FE3422111D6B818C0032BE88 /* ThrowScope.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -2162,10 +2165,12 @@ FEA08621182B7A0400F6D851 /* DebuggerPrimitives.h in Headers */ = {isa = PBXBuildFile; fileRef = FEA0861F182B7A0400F6D851 /* DebuggerPrimitives.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEA3BBA8212B655900E93AD1 /* CallFrameInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FEA3BBA7212B655800E93AD1 /* CallFrameInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEA3BBAC212C97CB00E93AD1 /* DFGCFG.h in Headers */ = {isa = PBXBuildFile; fileRef = FEA3BBAB212C97CB00E93AD1 /* DFGCFG.h */; }; + FEB21EA92AF7E596002C482C /* ExpressionInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = FEB21EA82AF7E595002C482C /* ExpressionInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEB51F6C1A97B688001F921C /* Regress141809.mm in Sources */ = {isa = PBXBuildFile; fileRef = FEB51F6B1A97B688001F921C /* Regress141809.mm */; }; FEB58C15187B8B160098EF0B /* ErrorHandlingScope.h in Headers */ = {isa = PBXBuildFile; fileRef = FEB58C13187B8B160098EF0B /* ErrorHandlingScope.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEC160322339E9F900A04CB8 /* CellSize.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC160312339E9F900A04CB8 /* CellSize.h */; }; FEC3A3A1248735CA00395B54 /* DFGDoesGCCheck.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC3A3A0248735BC00395B54 /* DFGDoesGCCheck.h */; settings = {ATTRIBUTES = (Private, ); }; }; + FEC503FE2B51E09700176A93 /* LineColumn.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC503FD2B51E09700176A93 /* LineColumn.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEC5797323105B5100BCA83F /* VMInspectorInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC5797223105B4800BCA83F /* VMInspectorInlines.h */; }; FEC5797623105F4E00BCA83F /* Integrity.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC5797523105F4300BCA83F /* Integrity.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEC579782310954C00BCA83F /* IntegrityInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC579772310954B00BCA83F /* IntegrityInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -2182,6 +2187,7 @@ FEF5B42C2628CBC80016E776 /* VMTrapsInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FEF5B42B2628CBC80016E776 /* VMTrapsInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEF5B430262A338B0016E776 /* ExceptionExpectation.h in Headers */ = {isa = PBXBuildFile; fileRef = FEF5B42F262A338B0016E776 /* ExceptionExpectation.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEF90A8D28AC135F00C14B84 /* APIIntegrityPrivate.h in Headers */ = {isa = PBXBuildFile; fileRef = FEF90A8C28AC135C00C14B84 /* APIIntegrityPrivate.h */; settings = {ATTRIBUTES = (Private, ); }; }; + FEF934472B4DC61500DFA7F5 /* ExpressionInfoInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FEF934462B4DC61500DFA7F5 /* ExpressionInfoInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; FEFD6FC61D5E7992008F2F0B /* JSStringInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FEFD6FC51D5E7970008F2F0B /* JSStringInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; FF41590C28FF3C6B00F80B96 /* WaiterListManager.h in Headers */ = {isa = PBXBuildFile; fileRef = FF41590B28FF3C6B00F80B96 /* WaiterListManager.h */; settings = {ATTRIBUTES = (Private, ); }; }; /* End PBXBuildFile section */ @@ -2380,6 +2386,13 @@ remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; remoteInfo = "Derived Sources"; }; + 44F93E102AE7200100FFA37C /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 44F93E012AE71F5300FFA37C; + remoteInfo = libJavaScriptCore; + }; 52CD0F652242F5A3004A18A5 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; @@ -2481,6 +2494,17 @@ /* End PBXContainerItemProxy section */ /* Begin PBXCopyFilesBuildPhase section */ + 446997032AFED699008B930C /* Product Dependencies */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = ""; + dstSubfolderSpec = 16; + files = ( + 4469970B2AFEF6CF008B930C /* JavaScriptCore.framework in Product Dependencies */, + ); + name = "Product Dependencies"; + runOnlyForDeploymentPostprocessing = 0; + }; 5DBB1524131D0BA10056AD36 /* Copy Support Script */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 2147483647; @@ -2542,7 +2566,6 @@ 0F0B839814BCF45A00885B4F /* LLIntThunks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntThunks.h; path = llint/LLIntThunks.h; sourceTree = ""; }; 0F0B83A514BCF50400885B4F /* CodeType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeType.h; sourceTree = ""; }; 0F0B83A814BCF55E00885B4F /* HandlerInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HandlerInfo.h; sourceTree = ""; }; - 0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExpressionRangeInfo.h; sourceTree = ""; }; 0F0B83AE14BCF71400885B4F /* CallLinkInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallLinkInfo.cpp; sourceTree = ""; }; 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallLinkInfo.h; sourceTree = ""; }; 0F0C03A92995FB710064230A /* HasOwnPropertyCache.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = HasOwnPropertyCache.cpp; sourceTree = ""; }; @@ -3932,8 +3955,18 @@ 43AB26C51C1A52F700D82AE6 /* B3MathExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = B3MathExtras.h; path = b3/B3MathExtras.h; sourceTree = ""; }; 43C392AA1C3BEB0000241F53 /* AssemblerCommon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AssemblerCommon.h; sourceTree = ""; }; 442FBD852149D1E00073519C /* hasher.py */ = {isa = PBXFileReference; lastKnownFileType = text.script.python; name = hasher.py; path = yarr/hasher.py; sourceTree = ""; }; + 443CB47A2AE8A4E100878CC0 /* BaseTarget.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = BaseTarget.xcconfig; sourceTree = ""; }; + 4487DB812AF825C700AFECAE /* Fuzzilli.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Fuzzilli.cpp; sourceTree = ""; }; + 4487DB822AF825C800AFECAE /* Fuzzilli.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Fuzzilli.h; sourceTree = ""; }; + 44F93DFD2AE71EBD00FFA37C /* libJavaScriptCore.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libJavaScriptCore.xcconfig; sourceTree = ""; }; + 44F93E022AE71F5400FFA37C /* libJavaScriptCore.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libJavaScriptCore.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 44F93E0D2AE71F9F00FFA37C /* JavaScriptCoreFramework.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = JavaScriptCoreFramework.cpp; sourceTree = ""; }; 451539B812DC994500EF7AC4 /* Yarr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Yarr.h; path = yarr/Yarr.h; sourceTree = ""; }; 45E12D8806A49B0F00E9DF84 /* jsc.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = jsc.cpp; sourceTree = ""; tabWidth = 4; }; + 4615E4662B5833FB001D4D53 /* WasmBBQJIT64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmBBQJIT64.h; sourceTree = ""; }; + 4615E4672B5833FB001D4D53 /* WasmBBQJIT32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmBBQJIT32_64.cpp; sourceTree = ""; }; + 4615E4682B5833FB001D4D53 /* WasmBBQJIT64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmBBQJIT64.cpp; sourceTree = ""; }; + 4615E4692B5833FC001D4D53 /* WasmBBQJIT32_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmBBQJIT32_64.h; sourceTree = ""; }; 4B78E098294427D2003C6682 /* B3SIMDValue.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = B3SIMDValue.cpp; path = b3/B3SIMDValue.cpp; sourceTree = ""; }; 4B78E099294427D2003C6682 /* B3Const128Value.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = B3Const128Value.h; path = b3/B3Const128Value.h; sourceTree = ""; }; 4B78E09A294427D2003C6682 /* B3SIMDValue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = B3SIMDValue.h; path = b3/B3SIMDValue.h; sourceTree = ""; }; @@ -3997,6 +4030,8 @@ 52F6C35C1E71EB080081F4CC /* WebAssemblyWrapperFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = WebAssemblyWrapperFunction.h; path = js/WebAssemblyWrapperFunction.h; sourceTree = ""; }; 52FDABC22788076900C15B59 /* WasmIRGeneratorHelpers.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = WasmIRGeneratorHelpers.h; sourceTree = ""; }; 5300740C22DD6F6600B9ACB3 /* JSFinalizationRegistry.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = JSFinalizationRegistry.cpp; sourceTree = ""; }; + 53091F972ABE1F570076CBC4 /* WasmIPIntSlowPaths.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmIPIntSlowPaths.cpp; sourceTree = ""; }; + 53091F982ABE1F570076CBC4 /* WasmIPIntSlowPaths.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmIPIntSlowPaths.h; sourceTree = ""; }; 530A63401FA3E31C0026A545 /* SourcesCocoa.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SourcesCocoa.txt; sourceTree = ""; }; 530A63411FA3E31D0026A545 /* Sources.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Sources.txt; sourceTree = ""; }; 530A66AD1FA3E7770026A545 /* UnifiedSource144.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = UnifiedSource144.cpp; path = "DerivedSources/JavaScriptCore/unified-sources/UnifiedSource144.cpp"; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -4247,6 +4282,7 @@ 53C6FEF01E8AFE0C00B18425 /* WasmOpcodeOrigin.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmOpcodeOrigin.cpp; sourceTree = ""; }; 53CA73071EA533D80076049D /* WasmBBQPlan.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmBBQPlan.cpp; sourceTree = ""; }; 53CA73081EA533D80076049D /* WasmBBQPlan.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmBBQPlan.h; sourceTree = ""; }; + 53CBE6522ACF18C0009C083D /* WasmIPIntTierUpCounter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmIPIntTierUpCounter.h; sourceTree = ""; }; 53D35497240D88AD008950DD /* BytecodeOperandsForCheckpoint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BytecodeOperandsForCheckpoint.h; sourceTree = ""; }; 53D35498240D88AD008950DD /* BytecodeUseDef.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BytecodeUseDef.cpp; sourceTree = ""; }; 53D41EC823C0081000AE984B /* IterationModeMetadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IterationModeMetadata.h; sourceTree = ""; }; @@ -4321,6 +4357,8 @@ 62EC9BB41B7EB07C00303AD1 /* CallFrameShuffleData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallFrameShuffleData.cpp; sourceTree = ""; }; 62EC9BB51B7EB07C00303AD1 /* CallFrameShuffleData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallFrameShuffleData.h; sourceTree = ""; }; 641DF80D2890C7D500F9895F /* WasmSIMDOpcodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmSIMDOpcodes.h; sourceTree = ""; }; + 65072B052B6C3DEA0065065C /* RegisterTZoneTypes.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RegisterTZoneTypes.cpp; sourceTree = ""; }; + 65072B062B6C3DEA0065065C /* RegisterTZoneTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RegisterTZoneTypes.h; sourceTree = ""; }; 6507D2970E871E4A00D7D896 /* JSTypeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSTypeInfo.h; sourceTree = ""; }; 651122E5140469BA002B101D /* testRegExp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = testRegExp.cpp; sourceTree = ""; }; 6511230514046A4C002B101D /* testRegExp */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = testRegExp; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -4347,6 +4385,14 @@ 6560A4CF04B3B3E7008AE952 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = /System/Library/Frameworks/CoreFoundation.framework; sourceTree = ""; }; 65621E6B089E859700760F35 /* PropertySlot.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PropertySlot.cpp; sourceTree = ""; tabWidth = 8; }; 65621E6C089E859700760F35 /* PropertySlot.h */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.c.h; path = PropertySlot.h; sourceTree = ""; tabWidth = 8; }; + 6571E94C2AF2E769009DF224 /* RuntimeTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuntimeTZoneImpls.cpp; sourceTree = ""; }; + 6571E94E2AF315A7009DF224 /* B3TZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = B3TZoneImpls.cpp; path = b3/B3TZoneImpls.cpp; sourceTree = ""; }; + 6571E9502AF31C34009DF224 /* AirTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AirTZoneImpls.cpp; path = b3/air/AirTZoneImpls.cpp; sourceTree = ""; }; + 6571E9542AF320AE009DF224 /* DFGTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGTZoneImpls.cpp; path = dfg/DFGTZoneImpls.cpp; sourceTree = ""; }; + 6571E9562AF3495A009DF224 /* WasmTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmTZoneImpls.cpp; sourceTree = ""; }; + 6571E9582AF35AA2009DF224 /* YarrTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = YarrTZoneImpls.cpp; path = yarr/YarrTZoneImpls.cpp; sourceTree = ""; }; + 6571E95A2AF9B98D009DF224 /* ProfilerTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = ProfilerTZoneImpls.cpp; path = profiler/ProfilerTZoneImpls.cpp; sourceTree = ""; }; + 6571E9812B23F4D0009DF224 /* JITTZoneImpls.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITTZoneImpls.cpp; sourceTree = ""; }; 657CF45619BF6662004ACBF2 /* JSCallee.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSCallee.cpp; sourceTree = ""; }; 657CF45719BF6662004ACBF2 /* JSCallee.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSCallee.h; sourceTree = ""; }; 65860177185A8F5E00030EEE /* MaxFrameExtentForSlowPathCall.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MaxFrameExtentForSlowPathCall.h; sourceTree = ""; }; @@ -4373,6 +4419,8 @@ 65EA4C9A092AF9E20093D800 /* JSLock.h */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.c.h; path = JSLock.h; sourceTree = ""; tabWidth = 8; }; 65EA73620BAE35D1001BB560 /* CommonIdentifiers.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = CommonIdentifiers.cpp; sourceTree = ""; }; 65EA73630BAE35D1001BB560 /* CommonIdentifiers.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = CommonIdentifiers.h; sourceTree = ""; }; + 65F85BD22B7F067500D0AC74 /* TZoneInit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TZoneInit.h; sourceTree = ""; }; + 65F85BD32B7F067500D0AC74 /* TZoneInit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TZoneInit.cpp; sourceTree = ""; }; 65FB5115184EE8F800C12B70 /* ProtoCallFrame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProtoCallFrame.h; sourceTree = ""; }; 6A38CFA71E32B58B0060206F /* AsyncStackTrace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AsyncStackTrace.cpp; sourceTree = ""; }; 6A38CFA81E32B58B0060206F /* AsyncStackTrace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AsyncStackTrace.h; sourceTree = ""; }; @@ -4614,9 +4662,6 @@ 86B8690224B8A20800487C95 /* PrivateFieldPutKind.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = PrivateFieldPutKind.cpp; sourceTree = ""; }; 86BF642A148DB2B5004DE36A /* Intrinsic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Intrinsic.h; sourceTree = ""; }; 86C36EE90EE1289D00B3DF59 /* MacroAssembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MacroAssembler.h; sourceTree = ""; }; - 86C568DE11A213EE0007F7F0 /* MacroAssemblerMIPS.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MacroAssemblerMIPS.h; sourceTree = ""; }; - 86C568DF11A213EE0007F7F0 /* MIPSAssembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MIPSAssembler.h; sourceTree = ""; }; - 86C568DF11A213EE0007F7FF /* MIPSRegisters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MIPSRegisters.h; sourceTree = ""; }; 86CC85A00EE79A4700288682 /* JITInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITInlines.h; sourceTree = ""; }; 86CC85A20EE79B7400288682 /* JITCall.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITCall.cpp; sourceTree = ""; }; 86CC85C30EE7A89400288682 /* JITPropertyAccess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITPropertyAccess.cpp; sourceTree = ""; }; @@ -5593,6 +5638,8 @@ E34F930B2322D881002B8DB4 /* JSGenerator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSGenerator.cpp; sourceTree = ""; }; E34F930C2322D881002B8DB4 /* JSGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSGenerator.h; sourceTree = ""; }; E35070891DC49BB60089BCD6 /* DOMJITSignature.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DOMJITSignature.h; sourceTree = ""; }; + E35257672AF97DE300BD4754 /* CallLinkInfoBase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallLinkInfoBase.cpp; sourceTree = ""; }; + E35257682AF97DE300BD4754 /* CallLinkInfoBase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallLinkInfoBase.h; sourceTree = ""; }; E353C11624AA4CB5003FBDF3 /* IntlDisplayNames.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IntlDisplayNames.cpp; sourceTree = ""; }; E353C11724AA4CB6003FBDF3 /* IntlDisplayNames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntlDisplayNames.h; sourceTree = ""; }; E353C11824AA4CB6003FBDF3 /* IntlDisplayNamesConstructor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntlDisplayNamesConstructor.h; sourceTree = ""; }; @@ -5730,6 +5777,7 @@ E3D2642A1D38C042000BE174 /* BytecodeRewriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BytecodeRewriter.h; sourceTree = ""; }; E3D3515D241B89CE008DC16E /* MarkedJSValueRefArray.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MarkedJSValueRefArray.h; sourceTree = ""; }; E3D3515E241B89CF008DC16E /* MarkedJSValueRefArray.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = MarkedJSValueRefArray.cpp; sourceTree = ""; }; + E3D4FFE12AF21D96004ED359 /* InlineAttribute.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InlineAttribute.h; sourceTree = ""; }; E3D7086C29FA667E0061F230 /* ScriptFunctionCall.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ScriptFunctionCall.h; sourceTree = ""; }; E3D7086D29FA667E0061F230 /* ScriptFunctionCall.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ScriptFunctionCall.cpp; sourceTree = ""; }; E3D877711E65C08900BE945A /* BytecodeDumper.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BytecodeDumper.cpp; sourceTree = ""; }; @@ -5857,6 +5905,8 @@ FE2A875F1F02381600EB31B2 /* MinimumReservedZoneSize.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MinimumReservedZoneSize.h; sourceTree = ""; }; FE2BD66E25C0DC8200999D3B /* VerifierSlotVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = VerifierSlotVisitor.cpp; sourceTree = ""; }; FE2CC92F2756B2B9003F5AB8 /* HeapSubspaceTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HeapSubspaceTypes.h; sourceTree = ""; }; + FE2D0B352AE242AF00A071A7 /* SideDataRepository.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SideDataRepository.cpp; sourceTree = ""; }; + FE2D0B362AE242AF00A071A7 /* SideDataRepository.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SideDataRepository.h; sourceTree = ""; }; FE2E6A7A1D6EA5FE0060F896 /* ThrowScope.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ThrowScope.cpp; sourceTree = ""; }; FE3022D41E42856700BAC493 /* VMInspector.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = VMInspector.cpp; sourceTree = ""; }; FE3022D51E42856700BAC493 /* VMInspector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VMInspector.h; sourceTree = ""; }; @@ -5949,6 +5999,8 @@ FEA3BBA7212B655800E93AD1 /* CallFrameInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallFrameInlines.h; sourceTree = ""; }; FEA3BBAB212C97CB00E93AD1 /* DFGCFG.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCFG.h; path = dfg/DFGCFG.h; sourceTree = ""; }; FEB137561BB11EEE00CD5100 /* MacroAssemblerARM64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MacroAssemblerARM64.cpp; sourceTree = ""; }; + FEB21EA62AF7E547002C482C /* ExpressionInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExpressionInfo.cpp; sourceTree = ""; }; + FEB21EA82AF7E595002C482C /* ExpressionInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExpressionInfo.h; sourceTree = ""; }; FEB41CCB1F73284200C5481E /* ProbeFrame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProbeFrame.h; sourceTree = ""; }; FEB51F6A1A97B688001F921C /* Regress141809.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Regress141809.h; path = API/tests/Regress141809.h; sourceTree = ""; }; FEB51F6B1A97B688001F921C /* Regress141809.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = Regress141809.mm; path = API/tests/Regress141809.mm; sourceTree = ""; }; @@ -5957,10 +6009,15 @@ FEC160312339E9F900A04CB8 /* CellSize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CellSize.h; sourceTree = ""; }; FEC3A39F248735BC00395B54 /* DFGDoesGCCheck.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = DFGDoesGCCheck.cpp; path = dfg/DFGDoesGCCheck.cpp; sourceTree = ""; }; FEC3A3A0248735BC00395B54 /* DFGDoesGCCheck.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = DFGDoesGCCheck.h; path = dfg/DFGDoesGCCheck.h; sourceTree = ""; }; + FEC503FD2B51E09700176A93 /* LineColumn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LineColumn.h; sourceTree = ""; }; FEC5797223105B4800BCA83F /* VMInspectorInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VMInspectorInlines.h; sourceTree = ""; }; FEC5797423105F4200BCA83F /* Integrity.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Integrity.cpp; sourceTree = ""; }; FEC5797523105F4300BCA83F /* Integrity.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Integrity.h; sourceTree = ""; }; FEC579772310954B00BCA83F /* IntegrityInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntegrityInlines.h; sourceTree = ""; }; + FEC60FE92AE971FD003B7B31 /* MacroAssemblerRISCV64.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = MacroAssemblerRISCV64.cpp; sourceTree = ""; }; + FEC60FEC2AE9722C003B7B31 /* MacroAssemblerRISCV64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MacroAssemblerRISCV64.h; sourceTree = ""; }; + FEC60FEE2AE9726E003B7B31 /* RISCV64Registers.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = RISCV64Registers.h; sourceTree = ""; }; + FEC60FEF2AE9726E003B7B31 /* RISCV64Assembler.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = RISCV64Assembler.h; sourceTree = ""; }; FECB8B251D25BB6E006F2463 /* FunctionOverridesTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = FunctionOverridesTest.cpp; path = API/tests/FunctionOverridesTest.cpp; sourceTree = ""; }; FECB8B261D25BB6E006F2463 /* FunctionOverridesTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FunctionOverridesTest.h; path = API/tests/FunctionOverridesTest.h; sourceTree = ""; }; FECB8B291D25CABB006F2463 /* testapi-function-overrides.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; name = "testapi-function-overrides.js"; path = "API/tests/testapi-function-overrides.js"; sourceTree = ""; }; @@ -5985,7 +6042,6 @@ FEF3475920362B1D00B7C0EF /* asm.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = asm.rb; sourceTree = ""; }; FEF3475A20362B1E00B7C0EF /* cloop.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = cloop.rb; sourceTree = ""; }; FEF3475B20362B1E00B7C0EF /* x86.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = x86.rb; sourceTree = ""; }; - FEF3475C20362B1E00B7C0EF /* mips.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = mips.rb; sourceTree = ""; }; FEF3475D20362B1F00B7C0EF /* config.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = config.rb; sourceTree = ""; }; FEF3475E20362B1F00B7C0EF /* instructions.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = instructions.rb; sourceTree = ""; }; FEF3475F20362B2000B7C0EF /* opt.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = opt.rb; sourceTree = ""; }; @@ -6004,6 +6060,7 @@ FEF5B42F262A338B0016E776 /* ExceptionExpectation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExceptionExpectation.h; sourceTree = ""; }; FEF90A8C28AC135C00C14B84 /* APIIntegrityPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = APIIntegrityPrivate.h; sourceTree = ""; }; FEF90A8E28AC187A00C14B84 /* APIIntegrity.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = APIIntegrity.cpp; sourceTree = ""; }; + FEF934462B4DC61500DFA7F5 /* ExpressionInfoInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExpressionInfoInlines.h; sourceTree = ""; }; FEFD6FC51D5E7970008F2F0B /* JSStringInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSStringInlines.h; sourceTree = ""; }; FF41590B28FF3C6B00F80B96 /* WaiterListManager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WaiterListManager.h; sourceTree = ""; }; FFB77C2828FF561B00F3C55B /* WaiterListManager.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WaiterListManager.cpp; sourceTree = ""; }; @@ -6071,6 +6128,13 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + 44F93DFF2AE71F5300FFA37C /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; 52CD0F5C2242F569004A18A5 /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -6102,12 +6166,6 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 932F5BD30822A1C700736975 /* CoreFoundation.framework in Frameworks */, - A731B25A130093880040A7FA /* Foundation.framework in Frameworks */, - 932F5BD70822A1C700736975 /* libicucore.dylib in Frameworks */, - 932F5BD60822A1C700736975 /* libobjc.dylib in Frameworks */, - DDB04F42278E56A2008D3678 /* libWTF.a in Frameworks */, - A5098B041C16AA0200087797 /* Security.framework in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -6115,7 +6173,6 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 932F5BEA0822A1C700736975 /* JavaScriptCore.framework in Frameworks */, 5D5D8AD10E0D0EBE00F9C692 /* libedit.dylib in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; @@ -6148,6 +6205,7 @@ 79281BDC20B62B3E002E2A60 /* testmem */, 6511230514046A4C002B101D /* testRegExp */, 932F5BD90822A1C700736975 /* JavaScriptCore.framework */, + 44F93E022AE71F5400FFA37C /* libJavaScriptCore.a */, ); name = Products; sourceTree = ""; @@ -6161,9 +6219,10 @@ 530A63401FA3E31C0026A545 /* SourcesCocoa.txt */, F68EBB8C0255D4C601FF60F7 /* config.h */, F692A8540255597D01FF60F7 /* create_hash_table */, - 532631B3218777A5007B8191 /* JavaScriptCore.modulemap */, 937B63CC09E766D200A671DD /* DerivedSources.make */, 0F93275A1C20BCDF00CF6564 /* dynbench.cpp */, + 532631B3218777A5007B8191 /* JavaScriptCore.modulemap */, + 44F93E0D2AE71F9F00FFA37C /* JavaScriptCoreFramework.cpp */, F5C290E60284F98E018635CA /* JavaScriptCorePrefix.h */, 45E12D8806A49B0F00E9DF84 /* jsc.cpp */, A7C225CC139981F100FF1662 /* KeywordLookupGenerator.py */, @@ -6182,6 +6241,7 @@ E3FF752D1D9CE9EA00C7E16D /* domjit */, 0867D69AFE84028FC02AAC07 /* Frameworks */, 0FEA09FC1705137F00BB722C /* ftl */, + 4487DB802AF8257200AFECAE /* fuzzilli */, FE35C2EF21B1E650000F4CA8 /* generator */, 142E312A134FF0A600AFADB5 /* heap */, A5BA15DF1823409200A82E69 /* inspector */, @@ -6507,6 +6567,7 @@ 0FEC84F11BDACDAC0080FF74 /* B3Type.cpp */, 0FEC84F21BDACDAC0080FF74 /* B3Type.h */, DCFDFBD81D1F5D9800FE3D72 /* B3TypeMap.h */, + 6571E94E2AF315A7009DF224 /* B3TZoneImpls.cpp */, 0FEC84F31BDACDAC0080FF74 /* B3UpsilonValue.cpp */, 0FEC84F41BDACDAC0080FF74 /* B3UpsilonValue.h */, 0FEC84F51BDACDAC0080FF74 /* B3UseCounts.cpp */, @@ -6651,6 +6712,7 @@ 0FE0E4AB1C24C94A002E17B6 /* AirTmpWidth.cpp */, 0FE0E4AC1C24C94A002E17B6 /* AirTmpWidth.h */, 0FEC858F2BDACDC70080FF74 /* AirTmpWidthInlines.h */, + 6571E9502AF31C34009DF224 /* AirTZoneImpls.cpp */, 0F3730921C0D67EE00052BFA /* AirUseCounts.h */, 0FEC856B1BDACDC70080FF74 /* AirValidate.cpp */, 0FEC856C1BDACDC70080FF74 /* AirValidate.h */, @@ -6860,6 +6922,7 @@ 0F5EF91C16878F78003E5C25 /* JITThunks.h */, 0FC712E017CD878F008CC93C /* JITToDFGDeferredCompilationCallback.cpp */, 0FC712E117CD878F008CC93C /* JITToDFGDeferredCompilationCallback.h */, + 6571E9812B23F4D0009DF224 /* JITTZoneImpls.cpp */, DC0184171D10C1870057B053 /* JITWorklist.cpp */, DC0184181D10C1870057B053 /* JITWorklist.h */, 72131BFB26587EFA007114CF /* JITWorklistInlines.h */, @@ -7269,9 +7332,11 @@ isa = PBXGroup; children = ( 1C9051450BA9E8A70081E9D0 /* Base.xcconfig */, + 443CB47A2AE8A4E100878CC0 /* BaseTarget.xcconfig */, 1C9051440BA9E8A70081E9D0 /* DebugRelease.xcconfig */, 1C9051430BA9E8A70081E9D0 /* JavaScriptCore.xcconfig */, 5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */, + 44F93DFD2AE71EBD00FFA37C /* libJavaScriptCore.xcconfig */, FEE0A12229FE250400CED5E4 /* TestExecutable.xcconfig */, BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */, ); @@ -7280,6 +7345,15 @@ tabWidth = 4; usesTabs = 0; }; + 4487DB802AF8257200AFECAE /* fuzzilli */ = { + isa = PBXGroup; + children = ( + 4487DB812AF825C700AFECAE /* Fuzzilli.cpp */, + 4487DB822AF825C800AFECAE /* Fuzzilli.h */, + ); + path = fuzzilli; + sourceTree = ""; + }; 534E03521E53BBA900213F64 /* accessCase */ = { isa = PBXGroup; children = ( @@ -7617,6 +7691,10 @@ E30D06B729C6C3E80014CCE7 /* WasmBBQDisassembler.h */, FED5FA3329A0859C00798A7F /* WasmBBQJIT.cpp */, FED5FA3229A0859C00798A7F /* WasmBBQJIT.h */, + 4615E4672B5833FB001D4D53 /* WasmBBQJIT32_64.cpp */, + 4615E4692B5833FC001D4D53 /* WasmBBQJIT32_64.h */, + 4615E4682B5833FB001D4D53 /* WasmBBQJIT64.cpp */, + 4615E4662B5833FB001D4D53 /* WasmBBQJIT64.h */, 53CA73071EA533D80076049D /* WasmBBQPlan.cpp */, 53CA73081EA533D80076049D /* WasmBBQPlan.h */, AD4B1DF71DF244D70071AE32 /* WasmBinding.cpp */, @@ -7665,6 +7743,9 @@ F3D9C2352A426CB7006EE152 /* WasmIPIntGenerator.h */, F3D9C2362A426CB7006EE152 /* WasmIPIntPlan.cpp */, F3D9C2372A426CB7006EE152 /* WasmIPIntPlan.h */, + 53091F972ABE1F570076CBC4 /* WasmIPIntSlowPaths.cpp */, + 53091F982ABE1F570076CBC4 /* WasmIPIntSlowPaths.h */, + 53CBE6522ACF18C0009C083D /* WasmIPIntTierUpCounter.h */, 52FDABC22788076900C15B59 /* WasmIRGeneratorHelpers.h */, AD5C36DC1F688B5F000BCAAF /* WasmJS.h */, AD00659D1ECAC7FE000CA926 /* WasmLimits.h */, @@ -7725,6 +7806,7 @@ AD7438BE1E04579200FD0C2A /* WasmTypeDefinition.cpp */, AD7438BF1E04579200FD0C2A /* WasmTypeDefinition.h */, 30A5F403F11C4F599CD596D5 /* WasmTypeDefinitionInlines.h */, + 6571E9562AF3495A009DF224 /* WasmTZoneImpls.cpp */, 863FBC5725B093B800F6C930 /* WasmValueLocation.cpp */, 863FBC5825B093B900F6C930 /* WasmValueLocation.h */, 530FB3031E7A1146003C19DD /* WasmWorklist.cpp */, @@ -8093,6 +8175,7 @@ 0F4AE0421FE0D25400E20839 /* InferredValueInlines.h */, E178636C0D9BEEC300D74E75 /* InitializeThreading.cpp */, E178633F0D9BEC0000D74E75 /* InitializeThreading.h */, + E3D4FFE12AF21D96004ED359 /* InlineAttribute.h */, A7A8AF2C17ADB5F3005AB174 /* Int16Array.h */, A7A8AF2D17ADB5F3005AB174 /* Int32Array.h */, A7A8AF2B17ADB5F3005AB174 /* Int8Array.h */, @@ -8558,11 +8641,14 @@ 84925A9A22B30CBA00D1DFFF /* RegExpStringIteratorPrototype.cpp */, 84925A9B22B30CBA00D1DFFF /* RegExpStringIteratorPrototype.h */, 276B38A22A71D1B600252F4E /* RegExpStringIteratorPrototypeInlines.h */, + 65072B052B6C3DEA0065065C /* RegisterTZoneTypes.cpp */, + 65072B062B6C3DEA0065065C /* RegisterTZoneTypes.h */, FE9F3FBA2613C87C0069E89F /* ResourceExhaustion.cpp */, FE9F3FB82613C7880069E89F /* ResourceExhaustion.h */, 70B0A9D01A9B66200001306A /* RuntimeFlags.h */, 527773DD1AAF83AC00BDE7E8 /* RuntimeType.cpp */, 52C0611D1AA51E1B00B4ADBA /* RuntimeType.h */, + 6571E94C2AF2E769009DF224 /* RuntimeTZoneImpls.cpp */, 0F7700911402FF280078EB39 /* SamplingCounter.cpp */, 0F77008E1402FDD60078EB39 /* SamplingCounter.h */, 79D5CD581C1106A900CECA07 /* SamplingProfiler.cpp */, @@ -8597,6 +8683,8 @@ 860295FA26FB552C0078EB62 /* ShadowRealmPrototype.cpp */, 860295FF26FB552D0078EB62 /* ShadowRealmPrototype.h */, 276B388E2A71D18700252F4E /* ShadowRealmPrototypeInlines.h */, + FE2D0B352AE242AF00A071A7 /* SideDataRepository.cpp */, + FE2D0B362AE242AF00A071A7 /* SideDataRepository.h */, 0F2B66D617B6B5AB00A7AE3F /* SimpleTypedArrayController.cpp */, 0F2B66D717B6B5AB00A7AE3F /* SimpleTypedArrayController.h */, FE8C0311264A6910001A44AD /* SlowPathFunction.h */, @@ -8734,6 +8822,8 @@ 0F2D4DE019832D91007D4B19 /* TypeProfilerLog.h */, 0F2D4DE319832D91007D4B19 /* TypeSet.cpp */, 0F2D4DE419832D91007D4B19 /* TypeSet.h */, + 65F85BD32B7F067500D0AC74 /* TZoneInit.cpp */, + 65F85BD22B7F067500D0AC74 /* TZoneInit.h */, 0F5B4A321C84F0D600F1B17E /* UGPRPair.h */, A7A8AF3217ADB5F3005AB174 /* Uint16Array.h */, 866739D113BFDE710023D87C /* Uint16WithFraction.h */, @@ -8847,6 +8937,7 @@ 86704B8312DBA33700A9FE7B /* YarrPattern.h */, 86704B4012DB8A8100A9FE7B /* YarrSyntaxChecker.cpp */, 86704B4112DB8A8100A9FE7B /* YarrSyntaxChecker.h */, + 6571E9582AF35AA2009DF224 /* YarrTZoneImpls.cpp */, 659CDA591F67509800D3E53F /* YarrUnicodeProperties.cpp */, 659CDA5A1F67509800D3E53F /* YarrUnicodeProperties.h */, ); @@ -9138,6 +9229,7 @@ 0FE7211C193B9C590031F6ED /* DFGTransition.h */, 0F63943C15C75F14006A597C /* DFGTypeCheckHoistingPhase.cpp */, 0F63943D15C75F14006A597C /* DFGTypeCheckHoistingPhase.h */, + 6571E9542AF320AE009DF224 /* DFGTZoneImpls.cpp */, 0FBE0F6F16C1DB010082C5E8 /* DFGUnificationPhase.cpp */, 0FBE0F7016C1DB010082C5E8 /* DFGUnificationPhase.h */, 0F34B14716D4200E001CDA5A /* DFGUseKind.cpp */, @@ -9213,6 +9305,7 @@ 0FB1058A1675482E00F8AB6E /* ProfilerOSRExitSite.h */, 0F13912616771C30009CCB07 /* ProfilerProfiledBytecodes.cpp */, 0F13912716771C30009CCB07 /* ProfilerProfiledBytecodes.h */, + 6571E95A2AF9B98D009DF224 /* ProfilerTZoneImpls.cpp */, DC605B5B1CE26E9800593718 /* ProfilerUID.cpp */, DC605B5C1CE26E9800593718 /* ProfilerUID.h */, ); @@ -9259,15 +9352,14 @@ 0F6DB7EB1D617D0F00CDBF8E /* MacroAssemblerCodeRef.cpp */, 863B23DF0FC60E6200703AA4 /* MacroAssemblerCodeRef.h */, E380A76B1DCD7195000F89E6 /* MacroAssemblerHelpers.h */, - 86C568DE11A213EE0007F7F0 /* MacroAssemblerMIPS.h */, FE68C6351B90DDD90042BCB3 /* MacroAssemblerPrinter.cpp */, FE68C6361B90DDD90042BCB3 /* MacroAssemblerPrinter.h */, + FEC60FE92AE971FD003B7B31 /* MacroAssemblerRISCV64.cpp */, + FEC60FEC2AE9722C003B7B31 /* MacroAssemblerRISCV64.h */, 860161E10F3A83C100F84710 /* MacroAssemblerX86_64.h */, A7A4AE0717973B26005612B1 /* MacroAssemblerX86Common.cpp */, 860161E20F3A83C100F84710 /* MacroAssemblerX86Common.h */, 65860177185A8F5E00030EEE /* MaxFrameExtentForSlowPathCall.h */, - 86C568DF11A213EE0007F7F0 /* MIPSAssembler.h */, - 86C568DF11A213EE0007F7FF /* MIPSRegisters.h */, FE37C5292A9C3EA9003EE733 /* OSCheck.h */, E38DAB522A95D23A0050B7A8 /* PerfLog.cpp */, E38DAB512A95D23A0050B7A8 /* PerfLog.h */, @@ -9279,6 +9371,8 @@ FE10AAE91F44D510009DEDC5 /* ProbeStack.cpp */, FE10AAEA1F44D512009DEDC5 /* ProbeStack.h */, 9688CB140ED12B4E001D6499 /* RegisterInfo.h */, + FEC60FEF2AE9726E003B7B31 /* RISCV64Assembler.h */, + FEC60FEE2AE9726E003B7B31 /* RISCV64Registers.h */, 52CAEC732790B8F600DDBAAF /* SecureARM64EHashPins.cpp */, 52CAEC742790B8F600DDBAAF /* SecureARM64EHashPins.h */, 52CAEC722790B8F600DDBAAF /* SecureARM64EHashPinsInlines.h */, @@ -9329,6 +9423,8 @@ 0F64B2781A7957B2006E4E66 /* CallEdge.h */, 0F0B83AE14BCF71400885B4F /* CallLinkInfo.cpp */, 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */, + E35257672AF97DE300BD4754 /* CallLinkInfoBase.cpp */, + E35257682AF97DE300BD4754 /* CallLinkInfoBase.h */, 0F93329314CA7DC10085F3C6 /* CallLinkStatus.cpp */, 0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */, 627673211B680C1E00FD9F2E /* CallMode.cpp */, @@ -9383,7 +9479,9 @@ 0F3AC753188E5EC80032029F /* ExitingJITType.h */, 0FB105821675480C00F8AB6E /* ExitKind.cpp */, 0FB105831675480C00F8AB6E /* ExitKind.h */, - 0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */, + FEB21EA62AF7E547002C482C /* ExpressionInfo.cpp */, + FEB21EA82AF7E595002C482C /* ExpressionInfo.h */, + FEF934462B4DC61500DFA7F5 /* ExpressionInfoInlines.h */, 14A46809216FA534000D2B1A /* Fits.h */, 0F666EBF183566F900D017F1 /* FullBytecodeLiveness.h */, AD4252521E5D0F22009D2A97 /* FullCodeOrigin.cpp */, @@ -9429,6 +9527,7 @@ 0FB5467614F59AD1002C2989 /* LazyOperandValueProfile.h */, 530F0A972AE0606900A0EEC0 /* LazyValueProfile.cpp */, 530F0A962AE0606900A0EEC0 /* LazyValueProfile.h */, + FEC503FD2B51E09700176A93 /* LineColumn.h */, E3637EE8236E56B00096BD0A /* LinkTimeConstant.cpp */, E3637EE7236E56B00096BD0A /* LinkTimeConstant.h */, 53FA2AE21CF380390022711D /* LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp */, @@ -10026,7 +10125,6 @@ FEF3476420362B2300B7C0EF /* generate_offset_extractor.rb */, FE0DA1022429417F00A884A6 /* generate_settings_extractor.rb */, FEF3475E20362B1F00B7C0EF /* instructions.rb */, - FEF3475C20362B1E00B7C0EF /* mips.rb */, FEF3476120362B2100B7C0EF /* offsets.rb */, FEF3475F20362B2000B7C0EF /* opt.rb */, FEF3475220362B1B00B7C0EF /* parser.rb */, @@ -10377,6 +10475,7 @@ 62EC9BB71B7EB07C00303AD1 /* CallFrameShuffleData.h in Headers */, 62D755D71B84FB4A001801FA /* CallFrameShuffler.h in Headers */, 0F0B83B114BCF71800885B4F /* CallLinkInfo.h in Headers */, + E352576A2AF97DE400BD4754 /* CallLinkInfoBase.h in Headers */, 0F93329E14CA7DC50085F3C6 /* CallLinkStatus.h in Headers */, 627673241B680C1E00FD9F2E /* CallMode.h in Headers */, 0F3B7E2B19A11B8000D9BC56 /* CallVariant.h in Headers */, @@ -10708,7 +10807,8 @@ 0F44A7B120BF68C90022B171 /* ExitingInlineKind.h in Headers */, 0F3AC754188E5EC80032029F /* ExitingJITType.h in Headers */, 0FB105861675481200F8AB6E /* ExitKind.h in Headers */, - 0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */, + FEB21EA92AF7E596002C482C /* ExpressionInfo.h in Headers */, + FEF934472B4DC61500DFA7F5 /* ExpressionInfoInlines.h in Headers */, 5267CF82249316B10022BF6D /* FastJITPermissions.h in Headers */, 0FEC3C571F33A45300F59B6C /* FastMallocAlignedMemoryAllocator.h in Headers */, CECFAD372372DAD400291599 /* FileBasedFuzzerAgent.h in Headers */, @@ -10905,6 +11005,7 @@ A5840E21187B7B8600843B10 /* InjectedScriptModule.h in Headers */, 9959E9321BD18279001AA413 /* inline-and-minify-stylesheets-and-scripts.py in Headers */, 7905BB691D12050E0019FE57 /* InlineAccess.h in Headers */, + E3D4FFE22AF21D96004ED359 /* InlineAttribute.h in Headers */, 0FF9CE741B9CD6D0004EDCA6 /* InlineCacheCompiler.h in Headers */, 148A7BF01B82975A002D9157 /* InlineCallFrame.h in Headers */, 0F24E55617F0B71C00ABB217 /* InlineCallFrameSet.h in Headers */, @@ -11277,6 +11378,8 @@ 99DA00B01BD5994E00F4575C /* lazywriter.py in Headers */, 1409ECBF225E177400BEDD54 /* LeafExecutable.h in Headers */, BC18C4310E16F5CD00B34460 /* Lexer.h in Headers */, + FEC503FE2B51E09700176A93 /* LineColumn.h in Headers */, + FEC503FE2B51E09700176A93 /* LineColumn.h in Headers */, 86D3B3C310159D7F002865E7 /* LinkBuffer.h in Headers */, E3637EE9236E56B00096BD0A /* LinkTimeConstant.h in Headers */, A7E2EA6B0FB460CF00601F06 /* LiteralParser.h in Headers */, @@ -11303,7 +11406,6 @@ 86ADD1460FDDEA980006EEC2 /* MacroAssemblerARMv7.h in Headers */, 863B23E00FC6118900703AA4 /* MacroAssemblerCodeRef.h in Headers */, E32AB2441DCD75F400D7533A /* MacroAssemblerHelpers.h in Headers */, - 86C568E111A213EE0007F7F0 /* MacroAssemblerMIPS.h in Headers */, FE68C6371B90DE040042BCB3 /* MacroAssemblerPrinter.h in Headers */, 860161E50F3A83C100F84710 /* MacroAssemblerX86_64.h in Headers */, 860161E60F3A83C100F84710 /* MacroAssemblerX86Common.h in Headers */, @@ -11338,8 +11440,6 @@ 0FB5467B14F5C7E1002C2989 /* MethodOfGettingAValueProfile.h in Headers */, 7C008CE7187631B600955C24 /* Microtask.h in Headers */, FE2A87601F02381600EB31B2 /* MinimumReservedZoneSize.h in Headers */, - 86C568E211A213EE0007F7F0 /* MIPSAssembler.h in Headers */, - 86C568E211A213EE0007F7FF /* MIPSRegisters.h in Headers */, C4703CD7192844CC0013FBEA /* models.py in Headers */, E3794E761B77EB97005543AE /* ModuleAnalyzer.h in Headers */, 9F63434577274FAFB9336C38 /* ModuleNamespaceAccessCase.h in Headers */, @@ -11492,6 +11592,7 @@ 623A37EC1B87A7C000754209 /* RegisterMap.h in Headers */, 0FC314121814559100033232 /* RegisterSet.h in Headers */, 0FD0E5F01E46BF250006AB08 /* RegisterState.h in Headers */, + 65072B082B6C3DEA0065065C /* RegisterTZoneTypes.h in Headers */, A57D23EE1891B5540031C7FA /* RegularExpression.h in Headers */, 0F7CF94F1DBEEE880098CC12 /* ReleaseHeapAccessScope.h in Headers */, 99D6A1161BEAD34D00E25C37 /* RemoteAutomationTarget.h in Headers */, @@ -11545,6 +11646,7 @@ 276B389B2A71D1A900252F4E /* ShadowRealmObjectInlines.h in Headers */, 8602960526FB552D0078EB62 /* ShadowRealmPrototype.h in Headers */, 276B38922A71D18800252F4E /* ShadowRealmPrototypeInlines.h in Headers */, + FE2D0B382AE242B000A071A7 /* SideDataRepository.h in Headers */, 4BC18E5628FDE6C800ECD68D /* SIMDInfo.h in Headers */, E379B59029834EC5007C4C0E /* SIMDShuffle.h in Headers */, 0F4D8C781FCA3CFA001D32AC /* SimpleMarkingConstraint.h in Headers */, @@ -11678,6 +11780,7 @@ 52C952B719A289850069B386 /* TypeProfiler.h in Headers */, 0F2D4DEC19832DC4007D4B19 /* TypeProfilerLog.h in Headers */, 0F2D4DF019832DD6007D4B19 /* TypeSet.h in Headers */, + 65F85BD42B7F067600D0AC74 /* TZoneInit.h in Headers */, 0F5B4A331C84F0D600F1B17E /* UGPRPair.h in Headers */, A7A8AF4117ADB5F3005AB174 /* Uint16Array.h in Headers */, 866739D313BFDE710023D87C /* Uint16WithFraction.h in Headers */, @@ -11749,6 +11852,8 @@ AD5C36E21F699EC0000BCAAF /* WasmInstance.h in Headers */, F3D9C2392A426CB8006EE152 /* WasmIPIntGenerator.h in Headers */, F3D9C23B2A426CB8006EE152 /* WasmIPIntPlan.h in Headers */, + 53091F9A2ABE1F570076CBC4 /* WasmIPIntSlowPaths.h in Headers */, + 53CBE6532ACF18C0009C083D /* WasmIPIntTierUpCounter.h in Headers */, 52FDABC32788076B00C15B59 /* WasmIRGeneratorHelpers.h in Headers */, AD5C36DD1F688B65000BCAAF /* WasmJS.h in Headers */, AD00659E1ECAC812000CA926 /* WasmLimits.h in Headers */, @@ -12018,6 +12123,30 @@ productReference = 14BD688E215191310050DAFF /* JSCLLIntSettingsExtractor */; productType = "com.apple.product-type.tool"; }; + 44F93E012AE71F5300FFA37C /* libJavaScriptCore */ = { + isa = PBXNativeTarget; + buildConfigurationList = 44F93E082AE71F5600FFA37C /* Build configuration list for PBXNativeTarget "libJavaScriptCore" */; + buildPhases = ( + F4CDF3C927E9147500191928 /* Copy Profiling Data */, + 44F93DFE2AE71F5300FFA37C /* Sources */, + 44F93DFF2AE71F5300FFA37C /* Frameworks */, + ); + buildRules = ( + DD284676291A27C90009A61D /* PBXBuildRule */, + DD41FA7D27CDA6FE00394D95 /* PBXBuildRule */, + 535E08C222545AC800DF00CA /* PBXBuildRule */, + ); + dependencies = ( + DD4ABD8429A6D61A00530828 /* PBXTargetDependency */, + DD4ABD8229A6D61A00530828 /* PBXTargetDependency */, + 65FB3F7E09D11EF300F49DEB /* PBXTargetDependency */, + 53B4BD141F68C2AA00D2BEA3 /* PBXTargetDependency */, + ); + name = libJavaScriptCore; + productName = JavaScriptCore; + productReference = 44F93E022AE71F5400FFA37C /* libJavaScriptCore.a */; + productType = "com.apple.product-type.library.static"; + }; 52CD0F592242F569004A18A5 /* testdfg */ = { isa = PBXNativeTarget; buildConfigurationList = 52CD0F5F2242F569004A18A5 /* Build configuration list for PBXNativeTarget "testdfg" */; @@ -12078,7 +12207,6 @@ buildConfigurationList = 149C275D08902AFE008A9EFC /* Build configuration list for PBXNativeTarget "JavaScriptCore" */; buildPhases = ( A53F1ABF18C90F8B0072EB6D /* Resources */, - F4CDF3C927E9147500191928 /* Copy Profiling Data */, 932F5B3F0822A1C700736975 /* Headers */, 932F5B910822A1C700736975 /* Sources */, 932F5BD20822A1C700736975 /* Frameworks */, @@ -12092,15 +12220,9 @@ DDA8F1542A15BEEA00BE8D11 /* Work around rdar://109484516 */, ); buildRules = ( - DD284676291A27C90009A61D /* PBXBuildRule */, - DD41FA7D27CDA6FE00394D95 /* PBXBuildRule */, - 535E08C222545AC800DF00CA /* PBXBuildRule */, ); dependencies = ( - DD4ABD8429A6D61A00530828 /* PBXTargetDependency */, - DD4ABD8229A6D61A00530828 /* PBXTargetDependency */, - 65FB3F7E09D11EF300F49DEB /* PBXTargetDependency */, - 53B4BD141F68C2AA00D2BEA3 /* PBXTargetDependency */, + 44F93E112AE7200100FFA37C /* PBXTargetDependency */, ); name = JavaScriptCore; productInstallPath = "${SYSTEM_LIBRARY_DIR}/Frameworks/WebKit.framework/Versions/A/Frameworks"; @@ -12112,6 +12234,7 @@ isa = PBXNativeTarget; buildConfigurationList = 149C276708902AFE008A9EFC /* Build configuration list for PBXNativeTarget "jsc" */; buildPhases = ( + 446997032AFED699008B930C /* Product Dependencies */, E3D6F6ED25D78C0100C20EB4 /* Generate Entitlements */, 932F5BDC0822A1C700736975 /* Sources */, 932F5BDE0822A1C700736975 /* Frameworks */, @@ -12155,6 +12278,9 @@ LastSwiftUpdateCheck = 0700; LastUpgradeCheck = 1140; TargetAttributes = { + 44F93E012AE71F5300FFA37C = { + CreatedOnToolsVersion = 15.0; + }; 5325BDBF21DFF2B100A0DEE1 = { CreatedOnToolsVersion = 10.1; ProvisioningStyle = Automatic; @@ -12175,6 +12301,7 @@ projectRoot = ""; targets = ( 932F5BE30822A1C700736975 /* All */, + 44F93E012AE71F5300FFA37C /* libJavaScriptCore */, 932F5B3E0822A1C700736975 /* JavaScriptCore */, 65FB3F6609D11E9100F49DEB /* Derived Sources */, 14BD6881215191310050DAFF /* JSCLLIntSettingsExtractor */, @@ -12828,31 +12955,7 @@ ); runOnlyForDeploymentPostprocessing = 0; }; - 52CD0F5A2242F569004A18A5 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 52CD0F682242F71C004A18A5 /* testdfg.cpp in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 651122FA14046A4C002B101D /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 6511230714046B0A002B101D /* testRegExp.cpp in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 79281BBE20B62B3E002E2A60 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 7954BE0B20B62D64009BC83A /* testmem.mm in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 932F5B910822A1C700736975 /* Sources */ = { + 44F93DFE2AE71F5300FFA37C /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( @@ -13042,16 +13145,53 @@ 538F15EA268FBBB600D601C4 /* UnifiedSource154.cpp in Sources */, 538F15EF268FBBB600D601C4 /* UnifiedSource155.cpp in Sources */, E3C091E929B07D4C00CD6D97 /* WasmBBQJIT.cpp in Sources */, + 4615E46A2B5849F4001D4D53 /* WasmBBQJIT32_64.cpp in Sources */, + 4615E46B2B5849F4001D4D53 /* WasmBBQJIT64.cpp in Sources */, + 642D20D52B476A2E0030545E /* WasmIPIntSlowPaths.cpp in Sources */, + 642D20D42B476A250030545E /* WasmSlowPaths.cpp in Sources */, E31135C9281B5B0000C1A4A9 /* ZydisFormatterATT.c in Sources */, E31135CA281B5B0300C1A4A9 /* ZydisFormatterBase.c in Sources */, E31135CB281B5B0500C1A4A9 /* ZydisFormatterIntel.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; + 52CD0F5A2242F569004A18A5 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 52CD0F682242F71C004A18A5 /* testdfg.cpp in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 651122FA14046A4C002B101D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6511230714046B0A002B101D /* testRegExp.cpp in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 79281BBE20B62B3E002E2A60 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 7954BE0B20B62D64009BC83A /* testmem.mm in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 932F5B910822A1C700736975 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 44F93E0E2AE71FBD00FFA37C /* JavaScriptCoreFramework.cpp in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; 932F5BDC0822A1C700736975 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 4487DB832AF825C800AFECAE /* Fuzzilli.cpp in Sources */, 932F5BDD0822A1C700736975 /* jsc.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -13147,6 +13287,11 @@ target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; targetProxy = 14D9D9D9218462B5009126C2 /* PBXContainerItemProxy */; }; + 44F93E112AE7200100FFA37C /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 44F93E012AE71F5300FFA37C /* libJavaScriptCore */; + targetProxy = 44F93E102AE7200100FFA37C /* PBXContainerItemProxy */; + }; 52CD0F662242F5A3004A18A5 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = 52CD0F592242F569004A18A5 /* testdfg */; @@ -13525,6 +13670,34 @@ }; name = Production; }; + 44F93E092AE71F5600FFA37C /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 44F93DFD2AE71EBD00FFA37C /* libJavaScriptCore.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 44F93E0A2AE71F5600FFA37C /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 44F93DFD2AE71EBD00FFA37C /* libJavaScriptCore.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 44F93E0B2AE71F5600FFA37C /* Profiling */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 44F93DFD2AE71EBD00FFA37C /* libJavaScriptCore.xcconfig */; + buildSettings = { + }; + name = Profiling; + }; + 44F93E0C2AE71F5600FFA37C /* Production */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 44F93DFD2AE71EBD00FFA37C /* libJavaScriptCore.xcconfig */; + buildSettings = { + }; + name = Production; + }; 52CD0F602242F569004A18A5 /* Debug */ = { isa = XCBuildConfiguration; baseConfigurationReference = FEE0A12229FE250400CED5E4 /* TestExecutable.xcconfig */; @@ -13951,6 +14124,17 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Production; }; + 44F93E082AE71F5600FFA37C /* Build configuration list for PBXNativeTarget "libJavaScriptCore" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 44F93E092AE71F5600FFA37C /* Debug */, + 44F93E0A2AE71F5600FFA37C /* Release */, + 44F93E0B2AE71F5600FFA37C /* Profiling */, + 44F93E0C2AE71F5600FFA37C /* Production */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Production; + }; 52CD0F5F2242F569004A18A5 /* Build configuration list for PBXNativeTarget "testdfg" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/DatePrototype.js b/vendor/webkit/Source/JavaScriptCore/JavaScriptCoreFramework.cpp similarity index 91% rename from vendor/webkit/Source/JavaScriptCore/builtins/DatePrototype.js rename to vendor/webkit/Source/JavaScriptCore/JavaScriptCoreFramework.cpp index 8c326593..214e7ec1 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/DatePrototype.js +++ b/vendor/webkit/Source/JavaScriptCore/JavaScriptCoreFramework.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Andy VanWagoner . + * Copyright (C) 2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,3 +23,4 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +// Used to build JavaScriptCore.framework and link libJavaScriptCore.a. diff --git a/vendor/webkit/Source/JavaScriptCore/KeywordLookupGenerator.py b/vendor/webkit/Source/JavaScriptCore/KeywordLookupGenerator.py index 84132d8a..91aef406 100644 --- a/vendor/webkit/Source/JavaScriptCore/KeywordLookupGenerator.py +++ b/vendor/webkit/Source/JavaScriptCore/KeywordLookupGenerator.py @@ -161,7 +161,7 @@ def printSubTreeAsC(self, typeName, indent): base = "code" if baseIndex > 0: base = "code + %d" % baseIndex - comparison = ("COMPARE_%d%sS(%s, " % (len(test), typeName, base)) + ", ".join(test) + ")" + comparison = ("compareCharacters(%s, " % (base,)) + ", ".join(test) + ")" if itemCount == 0: print(str + "if (" + comparison + ") {") else: @@ -220,88 +220,8 @@ def printAsC(self): print("// This file was generated by KeywordLookupGenerator.py. Do not edit.") print(""" #include +#include -#if CPU(NEEDS_ALIGNED_ACCESS) - -#define COMPARE_2CHARS(address, char1, char2) \\ - (((address)[0] == char1) && ((address)[1] == char2)) -#define COMPARE_4CHARS(address, char1, char2, char3, char4) \\ - (COMPARE_2CHARS(address, char1, char2) && COMPARE_2CHARS((address) + 2, char3, char4)) -#define COMPARE_2UCHARS(address, char1, char2) \\ - (((address)[0] == char1) && ((address)[1] == char2)) -#define COMPARE_4UCHARS(address, char1, char2, char3, char4) \\ - (COMPARE_2UCHARS(address, char1, char2) && COMPARE_2UCHARS((address) + 2, char3, char4)) - -#else // CPU(NEEDS_ALIGNED_ACCESS) - -#if CPU(BIG_ENDIAN) - -#define CHARPAIR_TOUINT16(a, b) \\ - ((((uint16_t)(a)) << 8) + (uint16_t)(b)) -#define CHARQUAD_TOUINT32(a, b, c, d) \\ - ((((uint32_t)(CHARPAIR_TOUINT16(a, b))) << 16) + CHARPAIR_TOUINT16(c, d)) -#define UCHARPAIR_TOUINT32(a, b) \\ - ((((uint32_t)(a)) << 16) + (uint32_t)(b)) -#define UCHARQUAD_TOUINT64(a, b, c, d) \\ - ((((uint64_t)(UCHARQUAD_TOUINT64(a, b))) << 32) + UCHARPAIR_TOUINT32(c, d)) - -#else // CPU(BIG_ENDIAN) - -#define CHARPAIR_TOUINT16(a, b) \\ - ((((uint16_t)(b)) << 8) + (uint16_t)(a)) -#define CHARQUAD_TOUINT32(a, b, c, d) \\ - ((((uint32_t)(CHARPAIR_TOUINT16(c, d))) << 16) + CHARPAIR_TOUINT16(a, b)) -#define UCHARPAIR_TOUINT32(a, b) \\ - ((((uint32_t)(b)) << 16) + (uint32_t)(a)) -#define UCHARQUAD_TOUINT64(a, b, c, d) \\ - ((((uint64_t)(UCHARPAIR_TOUINT32(c, d))) << 32) + UCHARPAIR_TOUINT32(a, b)) - -#endif // CPU(BIG_ENDIAN) - - -#define COMPARE_2CHARS(address, char1, char2) \\ - ((reinterpret_cast_ptr(address))[0] == CHARPAIR_TOUINT16(char1, char2)) -#define COMPARE_2UCHARS(address, char1, char2) \\ - ((reinterpret_cast_ptr(address))[0] == UCHARPAIR_TOUINT32(char1, char2)) - -#if CPU(X86_64) - -#define COMPARE_4CHARS(address, char1, char2, char3, char4) \\ - ((reinterpret_cast_ptr(address))[0] == CHARQUAD_TOUINT32(char1, char2, char3, char4)) -#define COMPARE_4UCHARS(address, char1, char2, char3, char4) \\ - ((reinterpret_cast_ptr(address))[0] == UCHARQUAD_TOUINT64(char1, char2, char3, char4)) - -#else // CPU(X86_64) - -#define COMPARE_4CHARS(address, char1, char2, char3, char4) \\ - (COMPARE_2CHARS(address, char1, char2) && COMPARE_2CHARS((address) + 2, char3, char4)) -#define COMPARE_4UCHARS(address, char1, char2, char3, char4) \\ - (COMPARE_2UCHARS(address, char1, char2) && COMPARE_2UCHARS((address) + 2, char3, char4)) - -#endif // CPU(X86_64) - -#endif // CPU(NEEDS_ALIGNED_ACCESS) - -#define COMPARE_3CHARS(address, char1, char2, char3) \\ - (COMPARE_2CHARS(address, char1, char2) && ((address)[2] == (char3))) -#define COMPARE_3UCHARS(address, char1, char2, char3) \\ - (COMPARE_2UCHARS(address, char1, char2) && ((address)[2] == (char3))) -#define COMPARE_5CHARS(address, char1, char2, char3, char4, char5) \\ - (COMPARE_4CHARS(address, char1, char2, char3, char4) && ((address)[4] == (char5))) -#define COMPARE_5UCHARS(address, char1, char2, char3, char4, char5) \\ - (COMPARE_4UCHARS(address, char1, char2, char3, char4) && ((address)[4] == (char5))) -#define COMPARE_6CHARS(address, char1, char2, char3, char4, char5, char6) \\ - (COMPARE_4CHARS(address, char1, char2, char3, char4) && COMPARE_2CHARS(address + 4, char5, char6)) -#define COMPARE_6UCHARS(address, char1, char2, char3, char4, char5, char6) \\ - (COMPARE_4UCHARS(address, char1, char2, char3, char4) && COMPARE_2UCHARS(address + 4, char5, char6)) -#define COMPARE_7CHARS(address, char1, char2, char3, char4, char5, char6, char7) \\ - (COMPARE_4CHARS(address, char1, char2, char3, char4) && COMPARE_4CHARS(address + 3, char4, char5, char6, char7)) -#define COMPARE_7UCHARS(address, char1, char2, char3, char4, char5, char6, char7) \\ - (COMPARE_4UCHARS(address, char1, char2, char3, char4) && COMPARE_4UCHARS(address + 3, char4, char5, char6, char7)) -#define COMPARE_8CHARS(address, char1, char2, char3, char4, char5, char6, char7, char8) \\ - (COMPARE_4CHARS(address, char1, char2, char3, char4) && COMPARE_4CHARS(address + 4, char5, char6, char7, char8)) -#define COMPARE_8UCHARS(address, char1, char2, char3, char4, char5, char6, char7, char8) \\ - (COMPARE_4UCHARS(address, char1, char2, char3, char4) && COMPARE_4UCHARS(address + 4, char5, char6, char7, char8)) """) trie.printAsC() diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/copy-profiling-data.sh b/vendor/webkit/Source/JavaScriptCore/Scripts/copy-profiling-data.sh index 016713b7..7c01755d 100755 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/copy-profiling-data.sh +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/copy-profiling-data.sh @@ -1,14 +1,21 @@ -#!/bin/sh +#!/bin/sh -e # Decompresses and copies PGO profiles from WebKitAdditions to a derived folder. if [ "${CLANG_USE_OPTIMIZATION_PROFILE}" = YES ]; then - compression_tool -v -decode -i "${SCRIPT_INPUT_FILE_0}" -o "${SCRIPT_OUTPUT_FILE_0}" && exit - if [ "${CONFIGURATION}" = Production ]; then - echo "error: ${SCRIPT_INPUT_FILE_0} failed to extract" - exit 1 + eval $(stat -s "${SCRIPT_INPUT_FILE_0}") + if [ ${st_size} -lt 1024 ]; then + if [ "${CONFIGURATION}" = Production ]; then + echo "error: ${SCRIPT_INPUT_FILE_0} is <1KB, is it a Git LFS stub?"\ + "Ensure this file was checked out on a machine with git-lfs installed." + exit 1 + else + echo "warning: ${SCRIPT_INPUT_FILE_0} is <1KB, is it a Git LFS stub?"\ + "To build with production optimizations, ensure this file was checked out on a machine with git-lfs installed."\ + "Falling back to stub profile data." + cp -vf "${SCRIPT_INPUT_FILE_1}" "${SCRIPT_OUTPUT_FILE_0}" + fi else - echo "warning: ${SCRIPT_INPUT_FILE_0} failed to extract, falling back to stub profile data" - cp -vf "${SCRIPT_INPUT_FILE_1}" "${SCRIPT_OUTPUT_FILE_0}" + compression_tool -v -decode -i "${SCRIPT_INPUT_FILE_0}" -o "${SCRIPT_OUTPUT_FILE_0}" fi fi diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/process-entitlements.sh b/vendor/webkit/Source/JavaScriptCore/Scripts/process-entitlements.sh index b5b2658c..51abc995 100755 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/process-entitlements.sh +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/process-entitlements.sh @@ -17,6 +17,7 @@ function mac_process_jsc_entitlements() if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 110000 )) then plistbuddy Add :com.apple.security.cs.jit-write-allowlist bool YES + plistbuddy Add :com.apple.developer.kernel.extended-virtual-addressing bool YES fi if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 120000 )) @@ -37,6 +38,7 @@ function mac_process_testapi_entitlements() if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 110000 )) then plistbuddy Add :com.apple.security.cs.jit-write-allowlist bool YES + plistbuddy Add :com.apple.developer.kernel.extended-virtual-addressing bool YES fi if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 120000 )) @@ -60,6 +62,7 @@ function maccatalyst_process_jsc_entitlements() if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 110000 )) then plistbuddy Add :com.apple.security.cs.jit-write-allowlist bool YES + plistbuddy Add :com.apple.developer.kernel.extended-virtual-addressing bool YES fi fi @@ -78,6 +81,7 @@ function maccatalyst_process_testapi_entitlements() if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 110000 )) then plistbuddy Add :com.apple.security.cs.jit-write-allowlist bool YES + plistbuddy Add :com.apple.developer.kernel.extended-virtual-addressing bool YES fi if (( "${TARGET_MAC_OS_X_VERSION_MAJOR}" >= 120000 )) @@ -95,6 +99,7 @@ function ios_family_process_jsc_entitlements() { plistbuddy Add :com.apple.private.verified-jit bool YES plistbuddy Add :dynamic-codesigning bool YES + plistbuddy Add :com.apple.developer.kernel.extended-virtual-addressing bool YES } function ios_family_process_testapi_entitlements() diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result index a7f75e69..7357b6e8 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result @@ -38,6 +38,7 @@ class VM; enum class ConstructAbility : uint8_t; enum class ConstructorKind : uint8_t; enum class ImplementationVisibility : uint8_t; +enum class InlineAttribute : uint8_t; } namespace JSC { @@ -52,11 +53,13 @@ extern const int s_builtinPromiseRejectPromiseCodeLength; extern const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPromiseRejectPromiseCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPromiseRejectPromiseCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPromiseRejectPromiseCodeInlineAttribute; extern const char* const s_builtinPromiseFulfillPromiseCode; extern const int s_builtinPromiseFulfillPromiseCodeLength; extern const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPromiseFulfillPromiseCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPromiseFulfillPromiseCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPromiseFulfillPromiseCodeInlineAttribute; #define JSC_FOREACH_BUILTINPROMISE_BUILTIN_DATA(macro) \ macro(rejectPromise, builtinPromiseRejectPromise, 2) \ @@ -132,6 +135,7 @@ const unsigned s_JSCCombinedCodeLength = 673; const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPromiseFulfillPromiseCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPromiseFulfillPromiseCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPromiseFulfillPromiseCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPromiseFulfillPromiseCodeLength = 336; static const JSC::Intrinsic s_builtinPromiseFulfillPromiseCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPromiseFulfillPromiseCode = @@ -141,6 +145,7 @@ s_JSCCombinedCode + 0 const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPromiseRejectPromiseCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPromiseRejectPromiseCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPromiseRejectPromiseCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPromiseRejectPromiseCodeLength = 337; static const JSC::Intrinsic s_builtinPromiseRejectPromiseCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPromiseRejectPromiseCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result index 378542d1..06719f90 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result @@ -45,11 +45,13 @@ extern const int s_builtinPromiseRejectPromiseCodeLength; extern const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPromiseRejectPromiseCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPromiseRejectPromiseCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPromiseRejectPromiseCodeInlineAttribute; extern const char* const s_builtinPromiseFulfillPromiseCode; extern const int s_builtinPromiseFulfillPromiseCodeLength; extern const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPromiseFulfillPromiseCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPromiseFulfillPromiseCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPromiseFulfillPromiseCodeInlineAttribute; #define JSC_FOREACH_BUILTIN_PROMISE_BUILTIN_DATA(macro) \ macro(rejectPromise, builtinPromiseRejectPromise, 2) \ @@ -121,6 +123,7 @@ namespace JSC { const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPromiseRejectPromiseCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPromiseRejectPromiseCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPromiseRejectPromiseCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPromiseRejectPromiseCodeLength = 337; static const JSC::Intrinsic s_builtinPromiseRejectPromiseCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPromiseRejectPromiseCode = @@ -141,6 +144,7 @@ const char* const s_builtinPromiseRejectPromiseCode = const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPromiseFulfillPromiseCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPromiseFulfillPromiseCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPromiseFulfillPromiseCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPromiseFulfillPromiseCodeLength = 336; static const JSC::Intrinsic s_builtinPromiseFulfillPromiseCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPromiseFulfillPromiseCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result index 88b1cf8e..d310b675 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result @@ -38,6 +38,7 @@ class VM; enum class ConstructAbility : uint8_t; enum class ConstructorKind : uint8_t; enum class ImplementationVisibility : uint8_t; +enum class InlineAttribute : uint8_t; } namespace JSC { @@ -52,21 +53,25 @@ extern const int s_builtinPrototypeEveryCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeEveryCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeEveryCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeEveryCodeInlineAttribute; extern const char* const s_builtinPrototypeForEachCode; extern const int s_builtinPrototypeForEachCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeForEachCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeForEachCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeForEachCodeInlineAttribute; extern const char* const s_builtinPrototypeMatchCode; extern const int s_builtinPrototypeMatchCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeMatchCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeMatchCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeMatchCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeMatchCodeInlineAttribute; extern const char* const s_builtinPrototypeTestCode; extern const int s_builtinPrototypeTestCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeTestCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeTestCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeTestCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeTestCodeInlineAttribute; #define JSC_FOREACH_BUILTINPROTOTYPE_BUILTIN_DATA(macro) \ macro(every, builtinPrototypeEvery, 1) \ @@ -148,6 +153,7 @@ const unsigned s_JSCCombinedCodeLength = 3198; const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeEveryCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeEveryCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeEveryCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeEveryCodeLength = 762; static const JSC::Intrinsic s_builtinPrototypeEveryCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPrototypeEveryCode = @@ -157,6 +163,7 @@ s_JSCCombinedCode + 0 const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeForEachCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeForEachCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeForEachCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeForEachCodeLength = 694; static const JSC::Intrinsic s_builtinPrototypeForEachCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPrototypeForEachCode = @@ -166,6 +173,7 @@ s_JSCCombinedCode + 762 const JSC::ConstructAbility s_builtinPrototypeMatchCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeMatchCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeMatchCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeMatchCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeMatchCodeLength = 1238; static const JSC::Intrinsic s_builtinPrototypeMatchCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPrototypeMatchCode = @@ -175,6 +183,7 @@ s_JSCCombinedCode + 1456 const JSC::ConstructAbility s_builtinPrototypeTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeTestCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeTestCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeTestCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeTestCodeLength = 504; static const JSC::Intrinsic s_builtinPrototypeTestCodeIntrinsic = JSC::RegExpTestIntrinsic; const char* const s_builtinPrototypeTestCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result index 844dc84c..d07e4dc2 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result @@ -45,21 +45,25 @@ extern const int s_builtinPrototypeEveryCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeEveryCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeEveryCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeEveryCodeInlineAttribute; extern const char* const s_builtinPrototypeForEachCode; extern const int s_builtinPrototypeForEachCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeForEachCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeForEachCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeForEachCodeInlineAttribute; extern const char* const s_builtinPrototypeMatchCode; extern const int s_builtinPrototypeMatchCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeMatchCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeMatchCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeMatchCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeMatchCodeInlineAttribute; extern const char* const s_builtinPrototypeTestCode; extern const int s_builtinPrototypeTestCodeLength; extern const JSC::ConstructAbility s_builtinPrototypeTestCodeConstructAbility; extern const JSC::ConstructorKind s_builtinPrototypeTestCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinPrototypeTestCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinPrototypeTestCodeInlineAttribute; #define JSC_FOREACH_BUILTIN_PROTOTYPE_BUILTIN_DATA(macro) \ macro(every, builtinPrototypeEvery, 1) \ @@ -139,6 +143,7 @@ namespace JSC { const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeEveryCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeEveryCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeEveryCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeEveryCodeLength = 762; static const JSC::Intrinsic s_builtinPrototypeEveryCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPrototypeEveryCode = @@ -174,6 +179,7 @@ const char* const s_builtinPrototypeEveryCode = const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeForEachCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeForEachCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeForEachCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeForEachCodeLength = 694; static const JSC::Intrinsic s_builtinPrototypeForEachCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPrototypeForEachCode = @@ -205,6 +211,7 @@ const char* const s_builtinPrototypeForEachCode = const JSC::ConstructAbility s_builtinPrototypeMatchCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeMatchCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeMatchCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeMatchCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeMatchCodeLength = 1238; static const JSC::Intrinsic s_builtinPrototypeMatchCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinPrototypeMatchCode = @@ -263,6 +270,7 @@ const char* const s_builtinPrototypeMatchCode = const JSC::ConstructAbility s_builtinPrototypeTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinPrototypeTestCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinPrototypeTestCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinPrototypeTestCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinPrototypeTestCodeLength = 504; static const JSC::Intrinsic s_builtinPrototypeTestCodeIntrinsic = JSC::RegExpTestIntrinsic; const char* const s_builtinPrototypeTestCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result index 31041284..360f53c1 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result @@ -37,6 +37,7 @@ class VM; enum class ConstructAbility : uint8_t; enum class ConstructorKind : uint8_t; enum class ImplementationVisibility : uint8_t; +enum class InlineAttribute : uint8_t; } namespace JSC { @@ -51,11 +52,13 @@ extern const int s_builtinConstructorOfCodeLength; extern const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility; extern const JSC::ConstructorKind s_builtinConstructorOfCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinConstructorOfCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinConstructorOfCodeInlineAttribute; extern const char* const s_builtinConstructorFromCode; extern const int s_builtinConstructorFromCodeLength; extern const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility; extern const JSC::ConstructorKind s_builtinConstructorFromCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinConstructorFromCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinConstructorFromCodeInlineAttribute; #define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_DATA(macro) \ macro(of, builtinConstructorOf, 0) \ @@ -130,6 +133,7 @@ const unsigned s_JSCCombinedCodeLength = 2340; const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinConstructorFromCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinConstructorFromCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinConstructorFromCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinConstructorFromCodeLength = 2046; static const JSC::Intrinsic s_builtinConstructorFromCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinConstructorFromCode = @@ -139,6 +143,7 @@ s_JSCCombinedCode + 0 const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinConstructorOfCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinConstructorOfCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinConstructorOfCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinConstructorOfCodeLength = 294; static const JSC::Intrinsic s_builtinConstructorOfCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinConstructorOfCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result index f5cb3972..390604c7 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result @@ -44,11 +44,13 @@ extern const int s_builtinConstructorOfCodeLength; extern const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility; extern const JSC::ConstructorKind s_builtinConstructorOfCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinConstructorOfCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinConstructorOfCodeInlineAttribute; extern const char* const s_builtinConstructorFromCode; extern const int s_builtinConstructorFromCodeLength; extern const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility; extern const JSC::ConstructorKind s_builtinConstructorFromCodeConstructorKind; extern const JSC::ImplementationVisibility s_builtinConstructorFromCodeImplementationVisibility; +extern const JSC::InlineAttribute s_builtinConstructorFromCodeInlineAttribute; #define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_DATA(macro) \ macro(of, builtinConstructorOf, 0) \ @@ -119,6 +121,7 @@ namespace JSC { const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinConstructorOfCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinConstructorOfCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinConstructorOfCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinConstructorOfCodeLength = 294; static const JSC::Intrinsic s_builtinConstructorOfCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinConstructorOfCode = @@ -139,6 +142,7 @@ const char* const s_builtinConstructorOfCode = const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_builtinConstructorFromCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_builtinConstructorFromCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_builtinConstructorFromCodeInlineAttribute = JSC::InlineAttribute::None; const int s_builtinConstructorFromCodeLength = 2046; static const JSC::Intrinsic s_builtinConstructorFromCodeIntrinsic = JSC::NoIntrinsic; const char* const s_builtinConstructorFromCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result index 4afb3ed1..2e97fd93 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result @@ -38,6 +38,7 @@ class VM; enum class ConstructAbility : uint8_t; enum class ConstructorKind : uint8_t; enum class ImplementationVisibility : uint8_t; +enum class InlineAttribute : uint8_t; } namespace JSC { @@ -52,11 +53,13 @@ extern const int s_internalClashingNamesIsReadableStreamLockedCodeLength; extern const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility; extern const JSC::ConstructorKind s_internalClashingNamesIsReadableStreamLockedCodeConstructorKind; extern const JSC::ImplementationVisibility s_internalClashingNamesIsReadableStreamLockedCodeImplementationVisibility; +extern const JSC::InlineAttribute s_internalClashingNamesIsReadableStreamLockedCodeInlineAttribute; extern const char* const s_internalClashingNamesIsReadableStreamLockedCode; extern const int s_internalClashingNamesIsReadableStreamLockedCodeLength; extern const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility; extern const JSC::ConstructorKind s_internalClashingNamesIsReadableStreamLockedCodeConstructorKind; extern const JSC::ImplementationVisibility s_internalClashingNamesIsReadableStreamLockedCodeImplementationVisibility; +extern const JSC::InlineAttribute s_internalClashingNamesIsReadableStreamLockedCodeInlineAttribute; #define JSC_FOREACH_INTERNALCLASHINGNAMES_BUILTIN_DATA(macro) \ macro(isReadableStreamLocked, internalClashingNamesIsReadableStreamLocked, 1) \ @@ -131,6 +134,7 @@ const unsigned s_JSCCombinedCodeLength = 142; const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_internalClashingNamesIsReadableStreamLockedCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_internalClashingNamesIsReadableStreamLockedCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_internalClashingNamesIsReadableStreamLockedCodeInlineAttribute = JSC::InlineAttribute::None; const int s_internalClashingNamesIsReadableStreamLockedCodeLength = 71; static const JSC::Intrinsic s_internalClashingNamesIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic; const char* const s_internalClashingNamesIsReadableStreamLockedCode = @@ -140,6 +144,7 @@ s_JSCCombinedCode + 0 const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_internalClashingNamesIsReadableStreamLockedCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_internalClashingNamesIsReadableStreamLockedCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_internalClashingNamesIsReadableStreamLockedCodeInlineAttribute = JSC::InlineAttribute::None; const int s_internalClashingNamesIsReadableStreamLockedCodeLength = 71; static const JSC::Intrinsic s_internalClashingNamesIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic; const char* const s_internalClashingNamesIsReadableStreamLockedCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result index 5e23f9b4..121f166a 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result @@ -49,6 +49,7 @@ extern const int s_anotherGuardedInternalBuiltinLetsFetchCodeLength; extern const JSC::ConstructAbility s_anotherGuardedInternalBuiltinLetsFetchCodeConstructAbility; extern const JSC::ConstructorKind s_anotherGuardedInternalBuiltinLetsFetchCodeConstructorKind; extern const JSC::ImplementationVisibility s_anotherGuardedInternalBuiltinLetsFetchCodeImplementationVisibility; +extern const JSC::InlineAttribute s_anotherGuardedInternalBuiltinLetsFetchCodeInlineAttribute; #define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_DATA(macro) \ macro(letsFetch, anotherGuardedInternalBuiltinLetsFetch, 0) \ @@ -108,7 +109,7 @@ inline JSC::UnlinkedFunctionExecutable* AnotherGuardedInternalBuiltinBuiltinsWra JSC::Identifier executableName = functionName##PublicName();\ if (overriddenName)\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\ }\ return m_##name##Executable.get();\ } @@ -211,6 +212,7 @@ namespace WebCore { const JSC::ConstructAbility s_anotherGuardedInternalBuiltinLetsFetchCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_anotherGuardedInternalBuiltinLetsFetchCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_anotherGuardedInternalBuiltinLetsFetchCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_anotherGuardedInternalBuiltinLetsFetchCodeInlineAttribute = JSC::InlineAttribute::None; const int s_anotherGuardedInternalBuiltinLetsFetchCodeLength = 83; static const JSC::Intrinsic s_anotherGuardedInternalBuiltinLetsFetchCodeIntrinsic = JSC::NoIntrinsic; const char* const s_anotherGuardedInternalBuiltinLetsFetchCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result index 49f67260..91a9916e 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result @@ -50,6 +50,7 @@ extern const int s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength; extern const JSC::ConstructAbility s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructAbility; extern const JSC::ConstructorKind s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructorKind; extern const JSC::ImplementationVisibility s_arbitraryConditionalGuardIsReadableStreamLockedCodeImplementationVisibility; +extern const JSC::InlineAttribute s_arbitraryConditionalGuardIsReadableStreamLockedCodeInlineAttribute; #define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_DATA(macro) \ macro(isReadableStreamLocked, arbitraryConditionalGuardIsReadableStreamLocked, 1) \ @@ -109,7 +110,7 @@ inline JSC::UnlinkedFunctionExecutable* ArbitraryConditionalGuardBuiltinsWrapper JSC::Identifier executableName = functionName##PublicName();\ if (overriddenName)\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\ }\ return m_##name##Executable.get();\ } @@ -176,6 +177,7 @@ namespace WebCore { const JSC::ConstructAbility s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_arbitraryConditionalGuardIsReadableStreamLockedCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_arbitraryConditionalGuardIsReadableStreamLockedCodeInlineAttribute = JSC::InlineAttribute::None; const int s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength = 71; static const JSC::Intrinsic s_arbitraryConditionalGuardIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic; const char* const s_arbitraryConditionalGuardIsReadableStreamLockedCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result index 372939bb..f5e6fe0b 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result @@ -50,6 +50,7 @@ extern const int s_guardedBuiltinIsReadableStreamLockedCodeLength; extern const JSC::ConstructAbility s_guardedBuiltinIsReadableStreamLockedCodeConstructAbility; extern const JSC::ConstructorKind s_guardedBuiltinIsReadableStreamLockedCodeConstructorKind; extern const JSC::ImplementationVisibility s_guardedBuiltinIsReadableStreamLockedCodeImplementationVisibility; +extern const JSC::InlineAttribute s_guardedBuiltinIsReadableStreamLockedCodeInlineAttribute; #define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_DATA(macro) \ macro(isReadableStreamLocked, guardedBuiltinIsReadableStreamLocked, 1) \ @@ -109,7 +110,7 @@ inline JSC::UnlinkedFunctionExecutable* GuardedBuiltinBuiltinsWrapper::name##Exe JSC::Identifier executableName = functionName##PublicName();\ if (overriddenName)\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\ }\ return m_##name##Executable.get();\ } @@ -176,6 +177,7 @@ namespace WebCore { const JSC::ConstructAbility s_guardedBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_guardedBuiltinIsReadableStreamLockedCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_guardedBuiltinIsReadableStreamLockedCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_guardedBuiltinIsReadableStreamLockedCodeInlineAttribute = JSC::InlineAttribute::None; const int s_guardedBuiltinIsReadableStreamLockedCodeLength = 71; static const JSC::Intrinsic s_guardedBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic; const char* const s_guardedBuiltinIsReadableStreamLockedCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result index b3d18d1a..bd13569d 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result @@ -50,6 +50,7 @@ extern const int s_guardedInternalBuiltinIsReadableStreamLockedCodeLength; extern const JSC::ConstructAbility s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructAbility; extern const JSC::ConstructorKind s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructorKind; extern const JSC::ImplementationVisibility s_guardedInternalBuiltinIsReadableStreamLockedCodeImplementationVisibility; +extern const JSC::InlineAttribute s_guardedInternalBuiltinIsReadableStreamLockedCodeInlineAttribute; #define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_DATA(macro) \ macro(isReadableStreamLocked, guardedInternalBuiltinIsReadableStreamLocked, 1) \ @@ -109,7 +110,7 @@ inline JSC::UnlinkedFunctionExecutable* GuardedInternalBuiltinBuiltinsWrapper::n JSC::Identifier executableName = functionName##PublicName();\ if (overriddenName)\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\ }\ return m_##name##Executable.get();\ } @@ -213,6 +214,7 @@ namespace WebCore { const JSC::ConstructAbility s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_guardedInternalBuiltinIsReadableStreamLockedCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_guardedInternalBuiltinIsReadableStreamLockedCodeInlineAttribute = JSC::InlineAttribute::None; const int s_guardedInternalBuiltinIsReadableStreamLockedCodeLength = 71; static const JSC::Intrinsic s_guardedInternalBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic; const char* const s_guardedInternalBuiltinIsReadableStreamLockedCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result index f376bf4a..2600722c 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result @@ -48,6 +48,7 @@ extern const int s_unguardedBuiltinIsReadableStreamLockedCodeLength; extern const JSC::ConstructAbility s_unguardedBuiltinIsReadableStreamLockedCodeConstructAbility; extern const JSC::ConstructorKind s_unguardedBuiltinIsReadableStreamLockedCodeConstructorKind; extern const JSC::ImplementationVisibility s_unguardedBuiltinIsReadableStreamLockedCodeImplementationVisibility; +extern const JSC::InlineAttribute s_unguardedBuiltinIsReadableStreamLockedCodeInlineAttribute; #define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_DATA(macro) \ macro(isReadableStreamLocked, unguardedBuiltinIsReadableStreamLocked, 1) \ @@ -107,7 +108,7 @@ inline JSC::UnlinkedFunctionExecutable* UnguardedBuiltinBuiltinsWrapper::name##E JSC::Identifier executableName = functionName##PublicName();\ if (overriddenName)\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\ }\ return m_##name##Executable.get();\ } @@ -170,6 +171,7 @@ namespace WebCore { const JSC::ConstructAbility s_unguardedBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_unguardedBuiltinIsReadableStreamLockedCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_unguardedBuiltinIsReadableStreamLockedCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_unguardedBuiltinIsReadableStreamLockedCodeInlineAttribute = JSC::InlineAttribute::None; const int s_unguardedBuiltinIsReadableStreamLockedCodeLength = 71; static const JSC::Intrinsic s_unguardedBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic; const char* const s_unguardedBuiltinIsReadableStreamLockedCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result index e2b89603..4d7e3340 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result @@ -50,16 +50,19 @@ extern const int s_xmlCasingTestXMLCasingTestCodeLength; extern const JSC::ConstructAbility s_xmlCasingTestXMLCasingTestCodeConstructAbility; extern const JSC::ConstructorKind s_xmlCasingTestXMLCasingTestCodeConstructorKind; extern const JSC::ImplementationVisibility s_xmlCasingTestXMLCasingTestCodeImplementationVisibility; +extern const JSC::InlineAttribute s_xmlCasingTestXMLCasingTestCodeInlineAttribute; extern const char* const s_xmlCasingTestCssCasingTestCode; extern const int s_xmlCasingTestCssCasingTestCodeLength; extern const JSC::ConstructAbility s_xmlCasingTestCssCasingTestCodeConstructAbility; extern const JSC::ConstructorKind s_xmlCasingTestCssCasingTestCodeConstructorKind; extern const JSC::ImplementationVisibility s_xmlCasingTestCssCasingTestCodeImplementationVisibility; +extern const JSC::InlineAttribute s_xmlCasingTestCssCasingTestCodeInlineAttribute; extern const char* const s_xmlCasingTestUrlCasingTestCode; extern const int s_xmlCasingTestUrlCasingTestCodeLength; extern const JSC::ConstructAbility s_xmlCasingTestUrlCasingTestCodeConstructAbility; extern const JSC::ConstructorKind s_xmlCasingTestUrlCasingTestCodeConstructorKind; extern const JSC::ImplementationVisibility s_xmlCasingTestUrlCasingTestCodeImplementationVisibility; +extern const JSC::InlineAttribute s_xmlCasingTestUrlCasingTestCodeInlineAttribute; #define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_DATA(macro) \ macro(xmlCasingTest, xmlCasingTestXMLCasingTest, 1) \ @@ -127,7 +130,7 @@ inline JSC::UnlinkedFunctionExecutable* xmlCasingTestBuiltinsWrapper::name##Exec JSC::Identifier executableName = functionName##PublicName();\ if (overriddenName)\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\ }\ return m_##name##Executable.get();\ } @@ -231,6 +234,7 @@ namespace WebCore { const JSC::ConstructAbility s_xmlCasingTestXMLCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_xmlCasingTestXMLCasingTestCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_xmlCasingTestXMLCasingTestCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_xmlCasingTestXMLCasingTestCodeInlineAttribute = JSC::InlineAttribute::None; const int s_xmlCasingTestXMLCasingTestCodeLength = 71; static const JSC::Intrinsic s_xmlCasingTestXMLCasingTestCodeIntrinsic = JSC::NoIntrinsic; const char* const s_xmlCasingTestXMLCasingTestCode = @@ -245,6 +249,7 @@ const char* const s_xmlCasingTestXMLCasingTestCode = const JSC::ConstructAbility s_xmlCasingTestCssCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_xmlCasingTestCssCasingTestCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_xmlCasingTestCssCasingTestCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_xmlCasingTestCssCasingTestCodeInlineAttribute = JSC::InlineAttribute::None; const int s_xmlCasingTestCssCasingTestCodeLength = 402; static const JSC::Intrinsic s_xmlCasingTestCssCasingTestCodeIntrinsic = JSC::NoIntrinsic; const char* const s_xmlCasingTestCssCasingTestCode = @@ -265,6 +270,7 @@ const char* const s_xmlCasingTestCssCasingTestCode = const JSC::ConstructAbility s_xmlCasingTestUrlCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct; const JSC::ConstructorKind s_xmlCasingTestUrlCasingTestCodeConstructorKind = JSC::ConstructorKind::None; const JSC::ImplementationVisibility s_xmlCasingTestUrlCasingTestCodeImplementationVisibility = JSC::ImplementationVisibility::Public; +const JSC::InlineAttribute s_xmlCasingTestUrlCasingTestCodeInlineAttribute = JSC::InlineAttribute::None; const int s_xmlCasingTestUrlCasingTestCodeLength = 338; static const JSC::Intrinsic s_xmlCasingTestUrlCasingTestCodeIntrinsic = JSC::NoIntrinsic; const char* const s_xmlCasingTestUrlCasingTestCode = diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_combined_header.py b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_combined_header.py index d7a9421e..13bb9034 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_combined_header.py +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_combined_header.py @@ -81,6 +81,7 @@ class VM; enum class ConstructAbility : uint8_t; enum class ConstructorKind : uint8_t; enum class ImplementationVisibility : uint8_t; +enum class InlineAttribute : uint8_t; }""" def generate_section_for_object(self, object): @@ -103,7 +104,8 @@ def generate_externs_for_object(self, object): extern const int s_%(codeName)sLength; extern const JSC::ConstructAbility s_%(codeName)sConstructAbility; extern const JSC::ConstructorKind s_%(codeName)sConstructorKind; -extern const JSC::ImplementationVisibility s_%(codeName)sImplementationVisibility;""" % function_args) +extern const JSC::ImplementationVisibility s_%(codeName)sImplementationVisibility; +extern const JSC::InlineAttribute s_%(codeName)sInlineAttribute;""" % function_args) return lines diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_separate_header.py b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_separate_header.py index d5035675..66be1e09 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_separate_header.py +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generate_separate_header.py @@ -129,7 +129,8 @@ def generate_externs_for_object(self, object): extern const int s_%(codeName)sLength; extern const JSC::ConstructAbility s_%(codeName)sConstructAbility; extern const JSC::ConstructorKind s_%(codeName)sConstructorKind; -extern const JSC::ImplementationVisibility s_%(codeName)sImplementationVisibility;""" % function_args) +extern const JSC::ImplementationVisibility s_%(codeName)sImplementationVisibility; +extern const JSC::InlineAttribute s_%(codeName)sInlineAttribute;""" % function_args) return lines diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generator.py b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generator.py index b0d44071..89bd7e55 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generator.py +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_generator.py @@ -133,6 +133,10 @@ def generate_embedded_code_data_for_function(self, function): if function.is_naked_constructor: constructorKind = "Naked" + inlineAttribute = "None" + if function.is_always_inline: + inlineAttribute = "Always" + return { 'codeName': BuiltinsGenerator.mangledNameForFunction(function) + 'Code', 'embeddedSource': embeddedSource, @@ -140,18 +144,20 @@ def generate_embedded_code_data_for_function(self, function): 'originalSource': text + "\n", 'constructAbility': constructAbility, 'constructorKind': constructorKind, + 'inlineAttribute': inlineAttribute, 'visibility': function.visibility, 'intrinsic': function.intrinsic } def generate_embedded_code_string_section_for_data(self, data): lines = [] - lines.append("const JSC::ConstructAbility s_%(codeName)sConstructAbility = JSC::ConstructAbility::%(constructAbility)s;" % data); - lines.append("const JSC::ConstructorKind s_%(codeName)sConstructorKind = JSC::ConstructorKind::%(constructorKind)s;" % data); - lines.append("const JSC::ImplementationVisibility s_%(codeName)sImplementationVisibility = JSC::ImplementationVisibility::%(visibility)s;" % data); - lines.append("const int s_%(codeName)sLength = %(embeddedSourceLength)d;" % data); - lines.append("static const JSC::Intrinsic s_%(codeName)sIntrinsic = JSC::%(intrinsic)s;" % data); - lines.append("const char* const s_%(codeName)s =\n%(embeddedSource)s\n;" % data); + lines.append("const JSC::ConstructAbility s_%(codeName)sConstructAbility = JSC::ConstructAbility::%(constructAbility)s;" % data) + lines.append("const JSC::ConstructorKind s_%(codeName)sConstructorKind = JSC::ConstructorKind::%(constructorKind)s;" % data) + lines.append("const JSC::ImplementationVisibility s_%(codeName)sImplementationVisibility = JSC::ImplementationVisibility::%(visibility)s;" % data) + lines.append("const JSC::InlineAttribute s_%(codeName)sInlineAttribute = JSC::InlineAttribute::%(inlineAttribute)s;" % data) + lines.append("const int s_%(codeName)sLength = %(embeddedSourceLength)d;" % data) + lines.append("static const JSC::Intrinsic s_%(codeName)sIntrinsic = JSC::%(intrinsic)s;" % data) + lines.append("const char* const s_%(codeName)s =\n%(embeddedSource)s\n;" % data) return '\n'.join(lines) # Helper methods. diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_model.py b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_model.py index 7e145798..71eacc0b 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_model.py +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_model.py @@ -44,6 +44,7 @@ functionHeadRegExp = re.compile(r"(?:@[\w|=\[\] \"\.]+\s*\n)*(?:async\s+)?function\s+\w+\s*\(.*?\)", re.MULTILINE | re.DOTALL) functionLinkTimeConstantRegExp = re.compile(r".*^@linkTimeConstant", re.MULTILINE | re.DOTALL) +functionAlwaysInlineRegExp = re.compile(r".*^@alwaysInline", re.MULTILINE | re.DOTALL) functionVisibilityRegExp = re.compile(r".*^@visibility=(\w+)", re.MULTILINE | re.DOTALL) functionNakedConstructorRegExp = re.compile(r".*^@nakedConstructor", re.MULTILINE | re.DOTALL) functionIntrinsicRegExp = re.compile(r".*^@intrinsic=(\w+)", re.MULTILINE | re.DOTALL) @@ -104,13 +105,14 @@ def __init__(self, object_name, annotations, functions): class BuiltinFunction: - def __init__(self, function_name, function_source, parameters, is_async, is_constructor, is_link_time_constant, is_naked_constructor, intrinsic, visibility, overridden_name): + def __init__(self, function_name, function_source, parameters, is_async, is_constructor, is_link_time_constant, is_naked_constructor, is_always_inline, intrinsic, visibility, overridden_name): self.function_name = function_name self.function_source = function_source self.parameters = parameters self.is_async = is_async self.is_constructor = is_constructor self.is_naked_constructor = is_naked_constructor + self.is_always_inline = is_always_inline self.is_link_time_constant = is_link_time_constant self.intrinsic = intrinsic self.visibility = visibility @@ -146,6 +148,7 @@ def fromString(function_string): is_getter = functionIsGetterRegExp.match(function_source) != None is_link_time_constant = functionLinkTimeConstantRegExp.match(function_source) != None is_naked_constructor = functionNakedConstructorRegExp.match(function_source) != None + is_always_inline = functionAlwaysInlineRegExp.match(function_source) != None if is_naked_constructor: is_constructor = True @@ -170,7 +173,7 @@ def fromString(function_string): if overridden_name[-1] == "\"": overridden_name += "_s" - return BuiltinFunction(function_name, function_source, parameters, is_async, is_constructor, is_link_time_constant, is_naked_constructor, intrinsic, visibility, overridden_name) + return BuiltinFunction(function_name, function_source, parameters, is_async, is_constructor, is_link_time_constant, is_naked_constructor, is_always_inline, intrinsic, visibility, overridden_name) def __str__(self): interface = "%s(%s)" % (self.function_name, ', '.join(self.parameters)) diff --git a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_templates.py b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_templates.py index 4e1fc907..63b26160 100644 --- a/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_templates.py +++ b/vendor/webkit/Source/JavaScriptCore/Scripts/wkbuiltins/builtins_templates.py @@ -168,7 +168,7 @@ class BuiltinsGeneratorTemplates: JSC::Identifier executableName = functionName##PublicName();\\ if (overriddenName)\\ executableName = JSC::Identifier::fromString(m_vm, overriddenName);\\ - m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility), this, &m_##name##Executable);\\ + m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute), this, &m_##name##Executable);\\ }\\ return m_##name##Executable.get();\\ } diff --git a/vendor/webkit/Source/JavaScriptCore/Sources.txt b/vendor/webkit/Source/JavaScriptCore/Sources.txt index b6131a97..63ebb5c1 100644 --- a/vendor/webkit/Source/JavaScriptCore/Sources.txt +++ b/vendor/webkit/Source/JavaScriptCore/Sources.txt @@ -55,7 +55,6 @@ assembler/MacroAssembler.cpp assembler/MacroAssemblerARM64.cpp @no-unify assembler/MacroAssemblerARMv7.cpp @no-unify assembler/MacroAssemblerCodeRef.cpp -assembler/MacroAssemblerMIPS.cpp @no-unify assembler/MacroAssemblerPrinter.cpp assembler/MacroAssemblerRISCV64.cpp @no-unify assembler/MacroAssemblerX86Common.cpp @no-unify @@ -109,6 +108,7 @@ b3/air/AirStackSlot.cpp b3/air/AirStackSlotKind.cpp b3/air/AirTmp.cpp b3/air/AirTmpWidth.cpp +b3/air/AirTZoneImpls.cpp b3/air/AirValidate.cpp b3/B3ArgumentRegValue.cpp @@ -176,6 +176,7 @@ b3/B3StackmapSpecial.cpp b3/B3StackmapValue.cpp b3/B3SwitchCase.cpp b3/B3SwitchValue.cpp +b3/B3TZoneImpls.cpp b3/B3Type.cpp b3/B3UpsilonValue.cpp b3/B3UseCounts.cpp @@ -210,6 +211,7 @@ bytecode/BytecodeRewriter.cpp bytecode/BytecodeUseDef.cpp bytecode/CallEdge.cpp bytecode/CallLinkInfo.cpp +bytecode/CallLinkInfoBase.cpp bytecode/CallLinkStatus.cpp bytecode/CallMode.cpp bytecode/CallVariant.cpp @@ -234,6 +236,7 @@ bytecode/ExitFlag.cpp bytecode/ExitKind.cpp bytecode/ExitingInlineKind.cpp bytecode/ExitingJITType.cpp +bytecode/ExpressionInfo.cpp bytecode/FullCodeOrigin.cpp bytecode/FunctionCodeBlock.cpp bytecode/GetByStatus.cpp @@ -435,6 +438,7 @@ dfg/DFGToFTLDeferredCompilationCallback.cpp dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp dfg/DFGTransition.cpp dfg/DFGTypeCheckHoistingPhase.cpp +dfg/DFGTZoneImpls.cpp dfg/DFGUnificationPhase.cpp dfg/DFGUseKind.cpp dfg/DFGValidate.cpp @@ -700,6 +704,7 @@ jit/SIMDInfo.cpp jit/SlowPathCall.cpp jit/TagRegistersMode.cpp jit/ThunkGenerators.cpp +jit/JITTZoneImpls.cpp jit/Width.cpp llint/InPlaceInterpreter.cpp @@ -737,6 +742,7 @@ profiler/ProfilerOrigin.cpp profiler/ProfilerOriginStack.cpp profiler/ProfilerProfiledBytecodes.cpp profiler/ProfilerUID.cpp +profiler/ProfilerTZoneImpls.cpp runtime/AbstractModuleRecord.cpp runtime/AggregateError.cpp @@ -1014,7 +1020,9 @@ runtime/RegExpMatchesArray.cpp runtime/RegExpObject.cpp runtime/RegExpPrototype.cpp runtime/RegExpStringIteratorPrototype.cpp +runtime/RegisterTZoneTypes.cpp runtime/ResourceExhaustion.cpp +runtime/RuntimeTZoneImpls.cpp runtime/RuntimeType.cpp runtime/SamplingCounter.cpp runtime/SamplingProfiler.cpp @@ -1025,6 +1033,7 @@ runtime/ScriptExecutable.cpp runtime/SetConstructor.cpp runtime/SetIteratorPrototype.cpp runtime/SetPrototype.cpp +runtime/SideDataRepository.cpp runtime/SimpleTypedArrayController.cpp runtime/SmallStrings.cpp runtime/SparseArrayValueMap.cpp @@ -1045,6 +1054,7 @@ runtime/SymbolObject.cpp runtime/SymbolPrototype.cpp runtime/SymbolTable.cpp runtime/SyntheticModuleRecord.cpp +runtime/TZoneInit.cpp runtime/TemplateObjectDescriptor.cpp runtime/TemporalCalendar.cpp runtime/TemporalCalendarConstructor.cpp @@ -1105,12 +1115,15 @@ tools/VMInspector.cpp wasm/WasmB3IRGenerator.cpp wasm/WasmBBQDisassembler.cpp wasm/WasmBBQJIT.cpp @no-unify +wasm/WasmBBQJIT32_64.cpp @no-unify +wasm/WasmBBQJIT64.cpp @no-unify wasm/WasmBBQPlan.cpp wasm/WasmBinding.cpp wasm/WasmBranchHintsSectionParser.cpp wasm/WasmCallee.cpp wasm/WasmCalleeGroup.cpp wasm/WasmCallingConvention.cpp +wasm/WasmCompilationContext.cpp wasm/WasmCompilationMode.cpp wasm/WasmConstExprGenerator.cpp wasm/WasmContext.cpp @@ -1126,6 +1139,7 @@ wasm/WasmInstance.cpp wasm/WasmInstance.h wasm/WasmIPIntGenerator.cpp wasm/WasmIPIntPlan.cpp +wasm/WasmIPIntSlowPaths.cpp @no-unify wasm/WasmLLIntGenerator.cpp wasm/WasmLLIntPlan.cpp wasm/WasmLLIntTierUpCounter.cpp @@ -1144,7 +1158,7 @@ wasm/WasmCallsiteCollection.cpp wasm/WasmSectionParser.cpp wasm/WasmTypeDefinition.cpp wasm/WasmOpcodeCounter.cpp -wasm/WasmSlowPaths.cpp +wasm/WasmSlowPaths.cpp @no-unify wasm/WasmStreamingCompiler.cpp wasm/WasmStreamingParser.cpp wasm/WasmStreamingPlan.cpp @@ -1152,6 +1166,7 @@ wasm/WasmTable.cpp wasm/WasmTag.cpp wasm/WasmThunks.cpp wasm/WasmTierUpCount.cpp +wasm/WasmTZoneImpls.cpp wasm/WasmValueLocation.cpp wasm/WasmWorklist.cpp @@ -1213,6 +1228,7 @@ yarr/YarrJIT.cpp yarr/YarrPattern.cpp yarr/YarrSyntaxChecker.cpp yarr/YarrUnicodeProperties.cpp +yarr/YarrTZoneImpls.cpp // Derived Sources yarr/YarrCanonicalizeUnicode.cpp diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/ARM64Assembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/ARM64Assembler.h index 0e4daf0e..9b00c2c9 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/ARM64Assembler.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/ARM64Assembler.h @@ -264,7 +264,7 @@ class ARM64Assembler { // (HS, LO, HI, LS) -> (AE, B, A, BE) // (VS, VC) -> (O, NO) - typedef enum { + typedef enum : uint8_t { ConditionEQ, ConditionNE, ConditionHS, ConditionCS = ConditionHS, @@ -313,7 +313,7 @@ class ARM64Assembler { #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index)) #define JUMP_ENUM_SIZE(jump) ((jump) >> 4) - enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), + enum JumpType : uint8_t { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)), JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), @@ -334,9 +334,33 @@ class ARM64Assembler { LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)), }; + enum BranchType : uint8_t { + BranchType_JMP, + BranchType_CALL, + BranchType_RET + }; + + enum class ThunkOrNot : uint8_t { + NotThunk = false, + Thunk = true, + }; + class LinkRecord { public: - LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, JumpType type, Condition condition) + LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, ThunkOrNot isThunk) + { + data.realTypes.m_from = from; +#if CPU(ARM64E) + data.realTypes.m_to = tagInt(to, static_cast(from ^ bitwise_cast(assembler))); +#else + UNUSED_PARAM(assembler); + data.realTypes.m_to = to; +#endif + data.realTypes.m_isThunk = isThunk; + data.realTypes.m_branchType = BranchType_CALL; + } + + LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, JumpType type, Condition condition, ThunkOrNot isThunk) { data.realTypes.m_from = from; #if CPU(ARM64E) @@ -346,10 +370,10 @@ class ARM64Assembler { data.realTypes.m_to = to; #endif data.realTypes.m_type = type; - data.realTypes.m_linkType = LinkInvalid; data.realTypes.m_condition = condition; + data.realTypes.m_isThunk = isThunk; } - LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) + LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister, ThunkOrNot isThunk) { data.realTypes.m_from = from; #if CPU(ARM64E) @@ -359,12 +383,12 @@ class ARM64Assembler { data.realTypes.m_to = to; #endif data.realTypes.m_type = type; - data.realTypes.m_linkType = LinkInvalid; data.realTypes.m_condition = condition; data.realTypes.m_is64Bit = is64Bit; + data.realTypes.m_isThunk = isThunk; data.realTypes.m_compareRegister = compareRegister; } - LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) + LinkRecord(const ARM64Assembler* assembler, intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister, ThunkOrNot isThunk) { data.realTypes.m_from = from; #if CPU(ARM64E) @@ -374,9 +398,9 @@ class ARM64Assembler { data.realTypes.m_to = to; #endif data.realTypes.m_type = type; - data.realTypes.m_linkType = LinkInvalid; data.realTypes.m_condition = condition; data.realTypes.m_bitNumber = bitNumber; + data.realTypes.m_isThunk = isThunk; data.realTypes.m_compareRegister = compareRegister; } // We are defining a copy constructor and assignment operator @@ -412,24 +436,28 @@ class ARM64Assembler { } JumpType type() const { return data.realTypes.m_type; } JumpLinkType linkType() const { return data.realTypes.m_linkType; } + BranchType branchType() const { return data.realTypes.m_branchType; } void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; } Condition condition() const { return data.realTypes.m_condition; } bool is64Bit() const { return data.realTypes.m_is64Bit; } + bool isThunk() const { return data.realTypes.m_isThunk == ThunkOrNot::Thunk; } unsigned bitNumber() const { return data.realTypes.m_bitNumber; } RegisterID compareRegister() const { return data.realTypes.m_compareRegister; } private: union { struct RealTypes { - int64_t m_from; - int64_t m_to; - RegisterID m_compareRegister; - JumpType m_type : 8; - JumpLinkType m_linkType : 8; - Condition m_condition : 4; - unsigned m_bitNumber : 6; - bool m_is64Bit : 1; - } realTypes; + int64_t m_from { 0 }; + int64_t m_to { 0 }; + RegisterID m_compareRegister { ARM64Registers::InvalidGPRReg }; + JumpType m_type : 8 { JumpNoCondition }; + JumpLinkType m_linkType : 8 { LinkInvalid }; + Condition m_condition : 4 { ConditionInvalid }; + unsigned m_bitNumber : 6 { 0 }; + bool m_is64Bit : 1 { false }; + ThunkOrNot m_isThunk : 1 { ThunkOrNot::NotThunk }; + BranchType m_branchType : 2 { BranchType_JMP }; + } realTypes { }; struct CopyTypes { uint64_t content[3]; } copyTypes; @@ -529,12 +557,6 @@ class ARM64Assembler { MemOpSize_64 = 3, }; - enum BranchType { - BranchType_JMP, - BranchType_CALL, - BranchType_RET - }; - enum AddOp { AddOp_ADD, AddOp_SUB @@ -2262,20 +2284,33 @@ class ARM64Assembler { } enum BranchTargetType { DirectBranch, IndirectBranch }; - using CopyFunction = void*(&)(void*, const void*, size_t); - template + template ALWAYS_INLINE static void fillNops(void* base, size_t size) { RELEASE_ASSERT(!(size % sizeof(int32_t))); size_t n = size / sizeof(int32_t); - for (int32_t* ptr = static_cast(base); n--;) { + int32_t* ptr = static_cast(base); + RELEASE_ASSERT(roundUpToMultipleOf(ptr) == ptr); + for (; n--;) { int insn = nopPseudo(); - RELEASE_ASSERT(roundUpToMultipleOf(ptr) == ptr); - copy(ptr++, &insn, sizeof(int)); + machineCodeCopy(ptr++, &insn, sizeof(int)); } } - + + template + ALWAYS_INLINE static void fillNearTailCall(void* from, void* to) + { + RELEASE_ASSERT(roundUpToMultipleOf(from) == from); + intptr_t offset = (bitwise_cast(to) - bitwise_cast(from)) >> 2; + ASSERT(static_cast(offset) == offset); + ASSERT(isInt<26>(offset)); + constexpr bool isCall = false; + int insn = unconditionalBranchImmediate(isCall, static_cast(offset)); + machineCodeCopy(from, &insn, sizeof(int)); + cacheFlush(from, sizeof(int)); + } + ALWAYS_INLINE void dmbISH() { insn(0xd5033bbf); @@ -3411,21 +3446,49 @@ class ARM64Assembler { { ASSERT(to.isSet()); ASSERT(from.isSet()); - m_jumpsToLink.append(LinkRecord(this, from.offset(), to.offset(), type, condition)); + m_jumpsToLink.append(LinkRecord(this, from.offset(), to.offset(), type, condition, ThunkOrNot::NotThunk)); } void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) { ASSERT(to.isSet()); ASSERT(from.isSet()); - m_jumpsToLink.append(LinkRecord(this, from.offset(), to.offset(), type, condition, is64Bit, compareRegister)); + m_jumpsToLink.append(LinkRecord(this, from.offset(), to.offset(), type, condition, is64Bit, compareRegister, ThunkOrNot::NotThunk)); } void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) { ASSERT(to.isSet()); ASSERT(from.isSet()); - m_jumpsToLink.append(LinkRecord(this, from.offset(), to.offset(), type, condition, bitNumber, compareRegister)); + m_jumpsToLink.append(LinkRecord(this, from.offset(), to.offset(), type, condition, bitNumber, compareRegister, ThunkOrNot::NotThunk)); + } + + void linkJumpThunk(AssemblerLabel from, void* to, JumpType type, Condition condition) + { + ASSERT(to); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(this, from.offset(), bitwise_cast(to), type, condition, ThunkOrNot::Thunk)); + } + + void linkJumpThunk(AssemblerLabel from, void* to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) + { + ASSERT(to); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(this, from.offset(), bitwise_cast(to), type, condition, is64Bit, compareRegister, ThunkOrNot::Thunk)); + } + + void linkJumpThunk(AssemblerLabel from, void* to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) + { + ASSERT(to); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(this, from.offset(), bitwise_cast(to), type, condition, bitNumber, compareRegister, ThunkOrNot::Thunk)); + } + + void linkNearCallThunk(AssemblerLabel from, void* to) + { + ASSERT(to); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(this, from.offset() - sizeof(int), bitwise_cast(to), ThunkOrNot::Thunk)); } static void linkJump(void* code, AssemblerLabel from, void* to) @@ -3461,7 +3524,7 @@ class ARM64Assembler { #if ENABLE(JUMP_ISLANDS) if (!isInt<26>(offset)) { - to = ExecutableAllocator::singleton().getJumpIslandTo(where, to); + to = ExecutableAllocator::singleton().getJumpIslandToUsingJITMemcpy(where, to); offset = (bitwise_cast(to) - bitwise_cast(where)) >> 2; RELEASE_ASSERT(isInt<26>(offset)); } @@ -3472,7 +3535,13 @@ class ARM64Assembler { performJITMemcpy(where, &insn, sizeof(int)); cacheFlush(where, sizeof(int)); } - + + static void replaceWithNops(void* where, size_t memoryToFillWithNopsInBytes) + { + fillNops(where, memoryToFillWithNopsInBytes); + cacheFlush(where, memoryToFillWithNopsInBytes); + } + static ptrdiff_t maxJumpReplacementSize() { return 4; @@ -3629,64 +3698,77 @@ class ARM64Assembler { static bool canCompact(JumpType jumpType) { // Fixed jumps cannot be compacted + // Keep in mind that nearCall and tailCall are encoded as JumpNoCondition. return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit); } - static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { - switch (jumpType) { - case JumpFixed: - return LinkInvalid; - case JumpNoConditionFixedSize: - return LinkJumpNoCondition; - case JumpConditionFixedSize: - return LinkJumpCondition; - case JumpCompareAndBranchFixedSize: - return LinkJumpCompareAndBranch; - case JumpTestBitFixedSize: - return LinkJumpTestBit; - case JumpNoCondition: - return LinkJumpNoCondition; - case JumpCondition: { - ASSERT(is4ByteAligned(from)); - ASSERT(is4ByteAligned(to)); - intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); - - if (isInt<21>(relative)) - return LinkJumpConditionDirect; - - return LinkJumpCondition; - } - case JumpCompareAndBranch: { - ASSERT(is4ByteAligned(from)); - ASSERT(is4ByteAligned(to)); - intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); - - if (isInt<21>(relative)) - return LinkJumpCompareAndBranchDirect; - - return LinkJumpCompareAndBranch; - } - case JumpTestBit: { - ASSERT(is4ByteAligned(from)); - ASSERT(is4ByteAligned(to)); - intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); - - if (isInt<14>(relative)) - return LinkJumpTestBitDirect; - - return LinkJumpTestBit; - } - default: - ASSERT_NOT_REACHED(); - } + auto computeJumpType = [&](const uint8_t* from, const uint8_t* to) -> JumpLinkType { + auto jumpType = record.type(); + switch (jumpType) { + case JumpFixed: + return LinkInvalid; + case JumpNoConditionFixedSize: + return LinkJumpNoCondition; + case JumpConditionFixedSize: + return LinkJumpCondition; + case JumpCompareAndBranchFixedSize: + return LinkJumpCompareAndBranch; + case JumpTestBitFixedSize: + return LinkJumpTestBit; + case JumpNoCondition: + return LinkJumpNoCondition; + case JumpCondition: { + ASSERT(is4ByteAligned(from)); + ASSERT(is4ByteAligned(to)); + intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); + if (record.isThunk()) { + int32_t delta = jumpSizeDelta(jumpType, LinkJumpConditionDirect); + relative += delta; + } + + if (isInt<21>(relative)) + return LinkJumpConditionDirect; + + return LinkJumpCondition; + } + case JumpCompareAndBranch: { + ASSERT(is4ByteAligned(from)); + ASSERT(is4ByteAligned(to)); + intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); + if (record.isThunk()) { + int32_t delta = jumpSizeDelta(jumpType, LinkJumpCompareAndBranchDirect); + relative += delta; + } + + if (isInt<21>(relative)) + return LinkJumpCompareAndBranchDirect; + + return LinkJumpCompareAndBranch; + } + case JumpTestBit: { + ASSERT(is4ByteAligned(from)); + ASSERT(is4ByteAligned(to)); + intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); + if (record.isThunk()) { + int32_t delta = jumpSizeDelta(jumpType, LinkJumpTestBitDirect); + relative += delta; + } + + if (isInt<14>(relative)) + return LinkJumpTestBitDirect; + + return LinkJumpTestBit; + } + default: + ASSERT_NOT_REACHED(); + } - return LinkJumpNoCondition; - } + return LinkJumpNoCondition; + }; - static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) - { - JumpLinkType linkType = computeJumpType(record.type(), from, to); + JumpLinkType linkType = computeJumpType(from, to); record.setLinkType(linkType); return linkType; } @@ -3699,14 +3781,25 @@ class ARM64Assembler { return m_jumpsToLink; } - template + template static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to) { const int* fromInstruction = reinterpret_cast(fromInstruction8); switch (record.linkType()) { - case LinkJumpNoCondition: - linkJumpOrCall(reinterpret_cast(from), fromInstruction, to); + case LinkJumpNoCondition: { + switch (record.branchType()) { + case BranchType_JMP: + linkJumpOrCall(reinterpret_cast(from), fromInstruction, to); + break; + case BranchType_CALL: + linkJumpOrCall(reinterpret_cast(from), fromInstruction, to); + break; + case BranchType_RET: + ASSERT_NOT_REACHED(); + break; + } break; + } case LinkJumpConditionDirect: linkConditionalBranch(record.condition(), reinterpret_cast(from), fromInstruction, to); break; @@ -3773,7 +3866,7 @@ class ARM64Assembler { setPointer(address, valuePtr, rd, flush); } - template + template static void linkJumpOrCall(int* from, const int* fromInstruction, void* to) { static_assert(type == BranchType_JMP || type == BranchType_CALL); @@ -3794,7 +3887,10 @@ class ARM64Assembler { #if ENABLE(JUMP_ISLANDS) if (!isInt<26>(offset)) { - to = ExecutableAllocator::singleton().getJumpIslandTo(bitwise_cast(fromInstruction), to); + if constexpr (copy == MachineCodeCopyMode::JITMemcpy) + to = ExecutableAllocator::singleton().getJumpIslandToUsingJITMemcpy(bitwise_cast(fromInstruction), to); + else + to = ExecutableAllocator::singleton().getJumpIslandToUsingMemcpy(bitwise_cast(fromInstruction), to); offset = (bitwise_cast(to) - bitwise_cast(fromInstruction)) >> 2; RELEASE_ASSERT(isInt<26>(offset)); } @@ -3802,12 +3898,13 @@ class ARM64Assembler { int insn = unconditionalBranchImmediate(isCall, static_cast(offset)); RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); } - template + template static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to) { + RELEASE_ASSERT(roundUpToMultipleOf(from) == from); ASSERT(!(reinterpret_cast(from) & 3)); ASSERT(!(reinterpret_cast(to) & 3)); intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2; @@ -3818,24 +3915,22 @@ class ARM64Assembler { if (useDirect || type == DirectBranch) { ASSERT(isInt<19>(offset)); int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast(offset), rt); - RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); if (type == IndirectBranch) { insn = nopPseudo(); - RELEASE_ASSERT(roundUpToMultipleOf(from + 1) == (from + 1)); - copy(from + 1, &insn, sizeof(int)); + machineCodeCopy(from + 1, &insn, sizeof(int)); } } else { int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt); - RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); linkJumpOrCall(from + 1, fromInstruction + 1, to); } } - template + template static void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to) { + RELEASE_ASSERT(roundUpToMultipleOf(from) == from); ASSERT(!(reinterpret_cast(from) & 3)); ASSERT(!(reinterpret_cast(to) & 3)); intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2; @@ -3846,24 +3941,22 @@ class ARM64Assembler { if (useDirect || type == DirectBranch) { ASSERT(isInt<19>(offset)); int insn = conditionalBranchImmediate(static_cast(offset), condition); - RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); if (type == IndirectBranch) { insn = nopPseudo(); - RELEASE_ASSERT(roundUpToMultipleOf(from + 1) == (from + 1)); - copy(from + 1, &insn, sizeof(int)); + machineCodeCopy(from + 1, &insn, sizeof(int)); } } else { int insn = conditionalBranchImmediate(2, invert(condition)); - RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); linkJumpOrCall(from + 1, fromInstruction + 1, to); } } - template + template static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to) { + RELEASE_ASSERT(roundUpToMultipleOf(from) == from); ASSERT(!(reinterpret_cast(from) & 3)); ASSERT(!(reinterpret_cast(to) & 3)); intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2; @@ -3875,17 +3968,14 @@ class ARM64Assembler { if (useDirect || type == DirectBranch) { ASSERT(isInt<14>(offset)); int insn = testAndBranchImmediate(condition == ConditionNE, static_cast(bitNumber), static_cast(offset), rt); - RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); if (type == IndirectBranch) { insn = nopPseudo(); - RELEASE_ASSERT(roundUpToMultipleOf(from + 1) == (from + 1)); - copy(from + 1, &insn, sizeof(int)); + machineCodeCopy(from + 1, &insn, sizeof(int)); } } else { int insn = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast(bitNumber), 2, rt); - RELEASE_ASSERT(roundUpToMultipleOf(from) == from); - copy(from, &insn, sizeof(int)); + machineCodeCopy(from, &insn, sizeof(int)); linkJumpOrCall(from + 1, fromInstruction + 1, to); } } diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/ARMv7Assembler.h index e72839fb..df1c2801 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/ARMv7Assembler.h @@ -459,12 +459,13 @@ class ARMv7Assembler { return *this; } intptr_t from() const { return data.realTypes.m_from; } - void setFrom(intptr_t from) { data.realTypes.m_from = from; } - intptr_t to() const { return data.realTypes.m_to; } + void setFrom(const ARMv7Assembler*, intptr_t from) { data.realTypes.m_from = from; } + intptr_t to(const ARMv7Assembler*) const { return data.realTypes.m_to; } JumpType type() const { return data.realTypes.m_type; } JumpLinkType linkType() const { return data.realTypes.m_linkType; } void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; } Condition condition() const { return data.realTypes.m_condition; } + bool isThunk() const { return false; } private: union { struct RealTypes { @@ -571,6 +572,7 @@ class ARMv7Assembler { OP_BKPT = 0xBE00, OP_IT = 0xBF00, OP_NOP_T1 = 0xBF00, + OP_UDF = 0xDE00 } OpcodeID; typedef enum { @@ -1008,6 +1010,11 @@ class ARMv7Assembler { m_formatter.oneWordOp8Imm8(OP_BKPT, imm); } + void udf(uint8_t imm = 0) + { + m_formatter.oneWordOp8Imm8(OP_UDF, imm); + } + static bool isBkpt(void* address) { unsigned short expected = OP_BKPT; @@ -1118,7 +1125,7 @@ class ARMv7Assembler { // Manual ARMv7-A and ARMv7-R edition available on // https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); - } else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) + } else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10() && !(imm.getUInt10() % 4)) m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast(imm.getUInt10() >> 2)); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12()); @@ -1134,6 +1141,7 @@ class ARMv7Assembler { { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt7()); + ASSERT(!(imm.getUInt7() % 4)); ASSERT(!((rt | rn) & 8)); m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); } @@ -1736,9 +1744,9 @@ class ARMv7Assembler { ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isUInt12()); - if (!((rt | rn) & 8) && imm.isUInt7()) + if (!((rt | rn) & 8) && imm.isUInt7() && !(imm.getUInt7() & 0x3)) m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt); - else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) + else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10() && !(imm.getUInt10() & 0x3)) m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast(imm.getUInt10() >> 2)); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12()); @@ -1856,7 +1864,7 @@ class ARMv7Assembler { ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isUInt12()); - if (!((rt | rn) & 8) && imm.isUInt6()) + if (!((rt | rn) & 8) && imm.isUInt6() && !(imm.getUInt6() & 0x1)) m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12()); @@ -2416,9 +2424,7 @@ class ARMv7Assembler { return OP_NOP_T2a | (OP_NOP_T2b << 16); } - using CopyFunction = void*(&)(void*, const void*, size_t); - - template + template ALWAYS_INLINE static void fillNops(void* base, size_t size) { RELEASE_ASSERT(!(size % sizeof(int16_t))); @@ -2427,7 +2433,7 @@ class ARMv7Assembler { const size_t num32s = size / sizeof(int32_t); for (size_t i = 0; i < num32s; i++) { const int32_t insn = nopPseudo32(); - copy(ptr, &insn, sizeof(int32_t)); + machineCodeCopy(ptr, &insn, sizeof(int32_t)); ptr += sizeof(int32_t); } @@ -2436,10 +2442,18 @@ class ARMv7Assembler { ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size); if (num16s) { const int16_t insn = nopPseudo16(); - copy(ptr, &insn, sizeof(int16_t)); + machineCodeCopy(ptr, &insn, sizeof(int16_t)); } } + template + ALWAYS_INLINE static void fillNearTailCall(void* from, void* to) + { + uint16_t* ptr = reinterpret_cast(from) + 2; + linkJumpT4(ptr, ptr, to, BranchWithLink::No); + cacheFlush(from, sizeof(uint16_t) * 2); + } + void dmbSY() { m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b); @@ -2574,7 +2588,7 @@ class ARMv7Assembler { return m_jumpsToLink; } - template + template static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to) { const uint16_t* fromInstruction = reinterpret_cast_ptr(fromInstruction8); @@ -2756,7 +2770,13 @@ class ARMv7Assembler { cacheFlush(ptr - 2, sizeof(uint16_t) * 2); #endif } - + + static void replaceWithNops(void* instructionStart, size_t memoryToFillWithNopsInBytes) + { + fillNops(instructionStart, memoryToFillWithNopsInBytes); + cacheFlush(instructionStart, memoryToFillWithNopsInBytes); + } + static ptrdiff_t maxJumpReplacementSize() { #if OS(LINUX) @@ -3031,7 +3051,7 @@ class ARMv7Assembler { return ((relative << 7) >> 7) == relative; } - template + template static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3048,10 +3068,10 @@ class ARMv7Assembler { // All branch offsets should be an even distance. ASSERT(!(relative & 1)); uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1); - copy(writeTarget - 1, &newInstruction, sizeof(uint16_t)); + machineCodeCopy(writeTarget - 1, &newInstruction, sizeof(uint16_t)); } - template + template static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3068,10 +3088,10 @@ class ARMv7Assembler { // All branch offsets should be an even distance. ASSERT(!(relative & 1)); uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1); - copy(writeTarget - 1, &newInstruction, sizeof(uint16_t)); + machineCodeCopy(writeTarget - 1, &newInstruction, sizeof(uint16_t)); } - template + template static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3086,10 +3106,10 @@ class ARMv7Assembler { uint16_t instructions[2]; instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12); instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1); - copy(writeTarget - 2, instructions, 2 * sizeof(uint16_t)); + machineCodeCopy(writeTarget - 2, instructions, 2 * sizeof(uint16_t)); } - template + template static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target, BranchWithLink link) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3107,10 +3127,10 @@ class ARMv7Assembler { uint16_t instructions[2]; instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); instructions[1] = OP_B_T4b | (static_cast(link) << 14) | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); - copy(writeTarget - 2, instructions, 2 * sizeof(uint16_t)); + machineCodeCopy(writeTarget - 2, instructions, 2 * sizeof(uint16_t)); } - template + template static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3118,11 +3138,11 @@ class ARMv7Assembler { ASSERT(!(reinterpret_cast(target) & 1)); uint16_t newInstruction = ifThenElse(cond) | OP_IT; - copy(writeTarget - 3, &newInstruction, sizeof(uint16_t)); + machineCodeCopy(writeTarget - 3, &newInstruction, sizeof(uint16_t)); linkJumpT4(writeTarget, instruction, target, BranchWithLink::No); } - template + template static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3140,10 +3160,10 @@ class ARMv7Assembler { instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); - copy(writeTarget - 5, instructions, 5 * sizeof(uint16_t)); + machineCodeCopy(writeTarget - 5, instructions, 5 * sizeof(uint16_t)); } - template + template static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( @@ -3152,7 +3172,7 @@ class ARMv7Assembler { linkBX(writeTarget, instruction, target); uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT; - copy(writeTarget - 6, &newInstruction, sizeof(uint16_t)); + machineCodeCopy(writeTarget - 6, &newInstruction, sizeof(uint16_t)); } static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target) @@ -3201,7 +3221,7 @@ class ARMv7Assembler { intptr_t offset = bitwise_cast(to) - bitwise_cast(fromInstruction); #if ENABLE(JUMP_ISLANDS) if (!isInt<25>(offset)) { - to = ExecutableAllocator::singleton().getJumpIslandTo(bitwise_cast(fromInstruction), to); + to = ExecutableAllocator::singleton().getJumpIslandToUsingJITMemcpy(bitwise_cast(fromInstruction), to); offset = bitwise_cast(to) - bitwise_cast(fromInstruction); } #endif diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h index f83f6958..dbc0f8b1 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -60,7 +61,7 @@ struct OSRExit; #define JIT_COMMENT(jit, ...) do { if (UNLIKELY(Options::needDisassemblySupport())) { (jit).comment(__VA_ARGS__); } else { (void) jit; } } while (0) class AbstractMacroAssemblerBase { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(AbstractMacroAssemblerBase); public: enum StatusCondition { Success, @@ -435,7 +436,7 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { masm->invalidateAllTempRegisters(); } - friend bool operator==(Label, Label) = default; + friend bool operator==(const Label&, const Label&) = default; bool isSet() const { return m_label.isSet(); } private: @@ -575,7 +576,7 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { { } - bool isFlagSet(Flags flag) + bool isFlagSet(Flags flag) const { return m_flags & flag; } @@ -585,6 +586,24 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { return Call(jump.m_label, Linkable); } + template + void linkThunk(CodeLocationLabel label, AbstractMacroAssemblerType* masm) const + { + ASSERT(isFlagSet(Near)); + ASSERT(isFlagSet(Linkable)); +#if CPU(ARM64) + if (isFlagSet(Tail)) + masm->m_assembler.linkJumpThunk(m_label, label.dataLocation(), ARM64Assembler::JumpNoCondition, ARM64Assembler::ConditionInvalid); + else + masm->m_assembler.linkNearCallThunk(m_label, label.dataLocation()); +#else + Call target = *this; + masm->addLinkTask([=](auto& linkBuffer) { + linkBuffer.link(target, label); + }); +#endif + } + AssemblerLabel m_label; private: Flags m_flags; @@ -632,9 +651,9 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) : m_label(jmp) + , m_bitNumber(bitNumber) , m_type(type) , m_condition(condition) - , m_bitNumber(bitNumber) , m_compareRegister(compareRegister) { ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); @@ -695,6 +714,24 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { #endif } + template + void linkThunk(CodeLocationLabel label, AbstractMacroAssemblerType* masm) const + { +#if CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJumpThunk(m_label, label.dataLocation(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJumpThunk(m_label, label.dataLocation(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJumpThunk(m_label, label.dataLocation(), m_type, m_condition); +#else + Jump target = *this; + masm->addLinkTask([=](auto& linkBuffer) { + linkBuffer.link(target, label); + }); +#endif + } + bool isSet() const { return m_label.isSet(); } private: @@ -703,10 +740,10 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { ARMv7Assembler::JumpType m_type { ARMv7Assembler::JumpNoCondition }; ARMv7Assembler::Condition m_condition { ARMv7Assembler::ConditionInvalid }; #elif CPU(ARM64) + unsigned m_bitNumber { 0 }; ARM64Assembler::JumpType m_type { ARM64Assembler::JumpNoCondition }; ARM64Assembler::Condition m_condition { ARM64Assembler::ConditionInvalid }; bool m_is64Bit { false }; - unsigned m_bitNumber { 0 }; ARM64Assembler::RegisterID m_compareRegister { ARM64Registers::InvalidGPRReg }; #endif }; @@ -723,6 +760,12 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { operator Jump&() { return m_jump; } + template + void linkThunk(CodeLocationLabel label, AbstractMacroAssemblerType* masm) const + { + m_jump.linkThunk(label, masm); + } + Jump m_jump; }; @@ -751,9 +794,15 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { void linkTo(Label label, AbstractMacroAssemblerType* masm) const { - size_t size = m_jumps.size(); - for (size_t i = 0; i < size; ++i) - m_jumps[i].linkTo(label, masm); + for (auto& jump : m_jumps) + jump.linkTo(label, masm); + } + + template + void linkThunk(CodeLocationLabel label, AbstractMacroAssemblerType* masm) const + { + for (auto& jump : m_jumps) + jump.linkThunk(label, masm); } void append(Jump jump) @@ -988,7 +1037,7 @@ class AbstractMacroAssembler : public AbstractMacroAssemblerBase { size_t startCodeSize = buffer.codeSize(); size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes; buffer.ensureSpace(memoryToFillWithNopsInBytes); - AssemblerType::template fillNops(static_cast(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes); + AssemblerType::template fillNops(static_cast(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes); buffer.setCodeSize(targetCodeSize); #endif } diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/AssemblerCommon.h b/vendor/webkit/Source/JavaScriptCore/assembler/AssemblerCommon.h index 50531b15..316712e9 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/AssemblerCommon.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/AssemblerCommon.h @@ -313,4 +313,20 @@ ALWAYS_INLINE bool isValidARMThumb2Immediate(int64_t value) return false; } +enum class MachineCodeCopyMode : uint8_t { + Memcpy, + JITMemcpy, +}; + +static void* performJITMemcpy(void *dst, const void *src, size_t n); + +template +ALWAYS_INLINE void* machineCodeCopy(void *dst, const void *src, size_t n) +{ + if constexpr (copy == MachineCodeCopyMode::Memcpy) + return memcpy(dst, src, n); + else + return performJITMemcpy(dst, src, n); +} + } // namespace JSC. diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.cpp index ffb1fea1..c38a9a31 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Apple Inc. All rights reserved. + * Copyright (C) 2022-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,11 +27,14 @@ #include "AssemblyComments.h" #include +#include namespace JSC { static LazyNeverDestroyed commentsRegistry; +WTF_MAKE_TZONE_ALLOCATED_IMPL(AssemblyCommentRegistry); + void AssemblyCommentRegistry::initialize() { commentsRegistry.construct(); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.h b/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.h index 0ad19d8d..89f2b275 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/AssemblyComments.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Apple Inc. All rights reserved. + * Copyright (C) 2022-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,12 +31,13 @@ #include #include #include +#include #include namespace JSC { class AssemblyCommentRegistry { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(AssemblyCommentRegistry); WTF_MAKE_NONCOPYABLE(AssemblyCommentRegistry); public: static AssemblyCommentRegistry& singleton(); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/CPU.h b/vendor/webkit/Source/JavaScriptCore/assembler/CPU.h index bad4fec4..fdc487a3 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/CPU.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/CPU.h @@ -25,6 +25,7 @@ #pragma once +#include "JSExportMacros.h" #include #include @@ -114,15 +115,6 @@ constexpr bool isX86_64_AVX() } #endif -constexpr bool isMIPS() -{ -#if CPU(MIPS) - return true; -#else - return false; -#endif -} - constexpr bool isRISCV64() { #if CPU(RISCV64) @@ -209,7 +201,7 @@ constexpr size_t prologueStackPointerDelta() #elif CPU(X86_64) // Prologue only saves the framePointerRegister return sizeof(CPURegister); -#elif CPU(ARM_THUMB2) || CPU(ARM64) || CPU(MIPS) || CPU(RISCV64) +#elif CPU(ARM_THUMB2) || CPU(ARM64) || CPU(RISCV64) // Prologue saves the framePointerRegister and linkRegister return 2 * sizeof(CPURegister); #else diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/FastJITPermissions.h b/vendor/webkit/Source/JavaScriptCore/assembler/FastJITPermissions.h index e5c7ff80..0fd60f0d 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/FastJITPermissions.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/FastJITPermissions.h @@ -34,44 +34,97 @@ #include -#if USE(PTHREAD_JIT_PERMISSIONS_API) -#include -#elif USE(APPLE_INTERNAL_SDK) -#include -#endif +#if USE(INLINE_JIT_PERMISSIONS_API) +#include + +static ALWAYS_INLINE bool threadSelfRestrictSupported() +{ + return (&be_memory_inline_jit_restrict_with_witness_supported != nullptr + && !!be_memory_inline_jit_restrict_with_witness_supported()); +} static ALWAYS_INLINE void threadSelfRestrictRWXToRW() { ASSERT(g_jscConfig.useFastJITPermissions); + be_memory_inline_jit_restrict_rwx_to_rw_with_witness(); +} + +static ALWAYS_INLINE void threadSelfRestrictRWXToRX() +{ + ASSERT(g_jscConfig.useFastJITPermissions); + be_memory_inline_jit_restrict_rwx_to_rx_with_witness(); +} -#if USE(PTHREAD_JIT_PERMISSIONS_API) +#elif USE(PTHREAD_JIT_PERMISSIONS_API) +#include + +static ALWAYS_INLINE bool threadSelfRestrictSupported() +{ + return !!pthread_jit_write_protect_supported_np();; +} + +static ALWAYS_INLINE void threadSelfRestrictRWXToRW() +{ + ASSERT(g_jscConfig.useFastJITPermissions); pthread_jit_write_protect_np(false); +} + +static ALWAYS_INLINE void threadSelfRestrictRWXToRX() +{ + ASSERT(g_jscConfig.useFastJITPermissions); + pthread_jit_write_protect_np(true); +} + #elif USE(APPLE_INTERNAL_SDK) +#include + +static ALWAYS_INLINE bool threadSelfRestrictSupported() +{ + return !!os_thread_self_restrict_rwx_is_supported(); +} + +static ALWAYS_INLINE void threadSelfRestrictRWXToRW() +{ + ASSERT(g_jscConfig.useFastJITPermissions); os_thread_self_restrict_rwx_to_rw(); +} + +static ALWAYS_INLINE void threadSelfRestrictRWXToRX() +{ + ASSERT(g_jscConfig.useFastJITPermissions); + os_thread_self_restrict_rwx_to_rx(); +} + #else + +static ALWAYS_INLINE bool threadSelfRestrictSupported() +{ + return false; +} + +static ALWAYS_INLINE void threadSelfRestrictRWXToRW() +{ bool tautologyToIgnoreWarning = true; if (tautologyToIgnoreWarning) RELEASE_ASSERT_NOT_REACHED(); -#endif } static ALWAYS_INLINE void threadSelfRestrictRWXToRX() { - ASSERT(g_jscConfig.useFastJITPermissions); - -#if USE(PTHREAD_JIT_PERMISSIONS_API) - pthread_jit_write_protect_np(true); -#elif USE(APPLE_INTERNAL_SDK) - os_thread_self_restrict_rwx_to_rx(); -#else bool tautologyToIgnoreWarning = true; if (tautologyToIgnoreWarning) RELEASE_ASSERT_NOT_REACHED(); -#endif } +#endif + #else // Not OS(DARWIN) && CPU(ARM64) +static ALWAYS_INLINE bool threadSelfRestrictSupported() +{ + return false; +} + NO_RETURN_DUE_TO_CRASH ALWAYS_INLINE void threadSelfRestrictRWXToRW() { CRASH(); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.cpp index 27c121b6..722be3fc 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.cpp @@ -33,37 +33,86 @@ #include "JITCode.h" #include "Options.h" #include "PerfLog.h" +#include "WasmCallee.h" +#include namespace JSC { size_t LinkBuffer::s_profileCummulativeLinkedSizes[LinkBuffer::numberOfProfiles]; size_t LinkBuffer::s_profileCummulativeLinkedCounts[LinkBuffer::numberOfProfiles]; -LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassemblyImpl() +WTF_MAKE_TZONE_ALLOCATED_IMPL(LinkBuffer); + +static const char* profileName(LinkBuffer::Profile profile) +{ +#define RETURN_LINKBUFFER_PROFILE_NAME(name) case LinkBuffer::Profile::name: return #name; + switch (profile) { + FOR_EACH_LINKBUFFER_PROFILE(RETURN_LINKBUFFER_PROFILE_NAME) + } + RELEASE_ASSERT_NOT_REACHED(); +#undef RETURN_LINKBUFFER_PROFILE_NAME + return ""; +} + +LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassemblyImpl(ASCIILiteral simpleName) { performFinalization(); ASSERT(m_didAllocate); - if (m_executableMemory) - return CodeRef(*m_executableMemory); - - return CodeRef::createSelfManagedCodeRef(m_code); + CodeRef codeRef(m_executableMemory ? CodeRef(*m_executableMemory) : CodeRef::createSelfManagedCodeRef(m_code)); + + if (UNLIKELY(Options::logJITCodeForPerf())) + logJITCodeForPerf(codeRef, simpleName); + + return codeRef; } -LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassemblyImpl(bool dumpDisassembly, const char* format, ...) +void LinkBuffer::logJITCodeForPerf(CodeRef& codeRef, ASCIILiteral simpleName) { - CodeRef result = finalizeCodeWithoutDisassemblyImpl(); - #if OS(LINUX) || OS(DARWIN) - if (Options::logJITCodeForPerf()) { - StringPrintStream out; - va_list argList; - va_start(argList, format); - out.vprintf(format, argList); - va_end(argList); - PerfLog::log(out.toCString(), result.code().untaggedPtr(), result.size()); + auto dumpSimpleName = [&](StringPrintStream& out, ASCIILiteral simpleName) { + if (simpleName.isNull()) + out.print("unspecified"); + else + out.print(simpleName); + }; + + StringPrintStream out; + out.print(profileName(m_profile), ": "); + switch (m_profile) { + case Profile::Baseline: + case Profile::DFG: + case Profile::FTL: { + if (m_ownerUID) + static_cast(m_ownerUID)->dumpSimpleName(out); + else + dumpSimpleName(out, simpleName); + break; + } +#if ENABLE(WEBASSEMBLY) + case Profile::WasmOMG: + case Profile::WasmBBQ: { + if (m_ownerUID) + out.print(makeString(static_cast(m_ownerUID)->indexOrName())); + else + dumpSimpleName(out, simpleName); + break; } #endif + default: + dumpSimpleName(out, simpleName); + break; + } + PerfLog::log(out.toCString(), codeRef.code().untaggedPtr(), codeRef.size()); +#else + UNUSED_PARAM(codeRef); + UNUSED_PARAM(simpleName); +#endif +} + +LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassemblyImpl(bool dumpDisassembly, ASCIILiteral simpleName, const char* format, ...) +{ + CodeRef result = finalizeCodeWithoutDisassemblyImpl(simpleName); if (!dumpDisassembly && !Options::logJIT()) return result; @@ -214,18 +263,15 @@ class BranchCompactionLinkBuffer { static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset) { +#if OS(DARWIN) + memset_pattern4(bitwise_cast(assemblerData.buffer()) + regionStart, &offset, regionEnd - regionStart); +#else int32_t ptr = regionStart / sizeof(int32_t); const int32_t end = regionEnd / sizeof(int32_t); int32_t* offsets = reinterpret_cast_ptr(assemblerData.buffer()); while (ptr < end) offsets[ptr++] = offset; -} - -// We use this to prevent compile errors on some platforms that are unhappy -// about the signature of the system's memcpy. -ALWAYS_INLINE void* memcpyWrapper(void* dst, const void* src, size_t bytes) -{ - return memcpy(dst, src, bytes); +#endif } template @@ -236,7 +282,7 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, JITCompi if (didFailToAllocate()) return; - Vector& jumpsToLink = macroAssembler.jumpsToLink(); + auto& jumpsToLink = macroAssembler.jumpsToLink(); m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData(); uint8_t* inData = bitwise_cast(m_assemblerStorage.buffer()); #if CPU(ARM64E) @@ -274,11 +320,12 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, JITCompi if (m_shouldPerformBranchCompaction) { for (unsigned i = 0; i < jumpCount; ++i) { + auto& linkRecord = jumpsToLink[i]; int offset = readPtr - writePtr; ASSERT(!(offset & 1)); - + // Copy the instructions from the last jump to the current one. - size_t regionSize = jumpsToLink[i].from() - readPtr; + size_t regionSize = linkRecord.from() - readPtr; InstructionType* copySource = reinterpret_cast_ptr(inData + readPtr); InstructionType* copyEnd = reinterpret_cast_ptr(inData + readPtr + regionSize); InstructionType* copyDst = reinterpret_cast_ptr(outData + writePtr); @@ -289,38 +336,32 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, JITCompi InstructionType insn = read(copySource++); *copyDst++ = insn; } - recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset); + recordLinkOffsets(m_assemblerStorage, readPtr, linkRecord.from(), offset); readPtr += regionSize; writePtr += regionSize; // Calculate absolute address of the jump target, in the case of backwards // branches we need to be precise, forward branches we are pessimistic const uint8_t* target; -#if CPU(ARM64) - const intptr_t to = jumpsToLink[i].to(¯oAssembler.m_assembler); -#else - const intptr_t to = jumpsToLink[i].to(); -#endif - if (to >= jumpsToLink[i].from()) + const intptr_t to = linkRecord.to(¯oAssembler.m_assembler); + if (linkRecord.isThunk()) + target = bitwise_cast(to); + else if (to >= linkRecord.from()) target = codeOutData + to - offset; // Compensate for what we have collapsed so far else target = codeOutData + to - executableOffsetFor(to); - JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], codeOutData + writePtr, target); + JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(linkRecord, codeOutData + writePtr, target); // Compact branch if we can... - if (MacroAssembler::canCompact(jumpsToLink[i].type())) { + if (MacroAssembler::canCompact(linkRecord.type())) { // Step back in the write stream - int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); + int32_t delta = MacroAssembler::jumpSizeDelta(linkRecord.type(), jumpLinkType); if (delta) { writePtr -= delta; - recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); + recordLinkOffsets(m_assemblerStorage, linkRecord.from() - delta, readPtr, readPtr - writePtr); } } -#if CPU(ARM64) - jumpsToLink[i].setFrom(¯oAssembler.m_assembler, writePtr); -#else - jumpsToLink[i].setFrom(writePtr); -#endif + linkRecord.setFrom(¯oAssembler.m_assembler, writePtr); } } else { if (ASSERT_ENABLED) { @@ -349,17 +390,18 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, JITCompi recordLinkOffsets(m_assemblerStorage, readPtr, initialSize, readPtr - writePtr); for (unsigned i = 0; i < jumpCount; ++i) { - uint8_t* location = codeOutData + jumpsToLink[i].from(); -#if CPU(ARM64) - const intptr_t to = jumpsToLink[i].to(¯oAssembler.m_assembler); -#else - const intptr_t to = jumpsToLink[i].to(); -#endif - uint8_t* target = codeOutData + to - executableOffsetFor(to); + auto& linkRecord = jumpsToLink[i]; + uint8_t* location = codeOutData + linkRecord.from(); + const intptr_t to = linkRecord.to(¯oAssembler.m_assembler); + uint8_t* target = nullptr; + if (linkRecord.isThunk()) + target = bitwise_cast(to); + else + target = codeOutData + to - executableOffsetFor(to); if (g_jscConfig.useFastJITPermissions) - MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target); + MacroAssembler::link(linkRecord, outData + linkRecord.from(), location, target); else - MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target); + MacroAssembler::link(linkRecord, outData + linkRecord.from(), location, target); } size_t compactSize = writePtr + initialSize - readPtr; @@ -367,9 +409,9 @@ void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, JITCompi size_t nopSizeInBytes = initialSize - compactSize; if (g_jscConfig.useFastJITPermissions) - Assembler::fillNops(outData + compactSize, nopSizeInBytes); + Assembler::fillNops(outData + compactSize, nopSizeInBytes); else - Assembler::fillNops(outData + compactSize, nopSizeInBytes); + Assembler::fillNops(outData + compactSize, nopSizeInBytes); } if (g_jscConfig.useFastJITPermissions) @@ -411,6 +453,7 @@ void LinkBuffer::linkCode(MacroAssembler& macroAssembler, JITCompilationEffort e // Ensure that the end of the last invalidation point does not extend beyond the end of the buffer. macroAssembler.padBeforePatch(); +#if ENABLE(JIT) #if !ENABLE(BRANCH_COMPACTION) #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL macroAssembler.m_assembler.buffer().flushConstantPool(false); @@ -425,14 +468,14 @@ void LinkBuffer::linkCode(MacroAssembler& macroAssembler, JITCompilationEffort e RELEASE_ASSERT(roundUpToMultipleOf(code) == code); #endif performJITMemcpy(code, buffer.data(), buffer.codeSize()); -#if CPU(MIPS) - macroAssembler.m_assembler.relocateJumps(buffer.data(), code); -#endif #elif CPU(ARM_THUMB2) copyCompactAndLinkCode(macroAssembler, effort); #elif CPU(ARM64) copyCompactAndLinkCode(macroAssembler, effort); #endif // !ENABLE(BRANCH_COMPACTION) +#else // ENABLE(JIT) +UNUSED_PARAM(effort); +#endif // ENABLE(JIT) m_linkTasks = WTFMove(macroAssembler.m_linkTasks); m_lateLinkTasks = WTFMove(macroAssembler.m_lateLinkTasks); @@ -581,19 +624,10 @@ void LinkBuffer::dumpProfileStatistics(std::optional outStream) Stat sortedStats[numberOfProfiles]; PrintStream& out = outStream ? *outStream.value() : WTF::dataFile(); -#define RETURN_LINKBUFFER_PROFILE_NAME(name) case Profile::name: return #name; - auto name = [] (Profile profile) -> const char* { - switch (profile) { - FOR_EACH_LINKBUFFER_PROFILE(RETURN_LINKBUFFER_PROFILE_NAME) - } - RELEASE_ASSERT_NOT_REACHED(); - }; -#undef RETURN_LINKBUFFER_PROFILE_NAME - size_t totalOfAllProfilesSize = 0; auto dumpStat = [&] (const Stat& stat) { char formattedName[21]; - snprintf(formattedName, 21, "%20s", name(stat.profile)); + snprintf(formattedName, 21, "%20s", profileName(stat.profile)); const char* largerUnit = nullptr; double sizeInLargerUnit = stat.size; diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.h b/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.h index 3aa83146..0c53ae65 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/LinkBuffer.h @@ -38,7 +38,7 @@ #include "MacroAssembler.h" #include "MacroAssemblerCodeRef.h" #include -#include +#include namespace JSC { @@ -57,7 +57,8 @@ namespace JSC { // * The value referenced by a DataLabel may be set. // class LinkBuffer { - WTF_MAKE_NONCOPYABLE(LinkBuffer); WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_NONCOPYABLE(LinkBuffer); + WTF_MAKE_TZONE_ALLOCATED(LinkBuffer); template using CodeRef = MacroAssemblerCodeRef; typedef MacroAssembler::Label Label; @@ -77,7 +78,7 @@ class LinkBuffer { public: #define FOR_EACH_LINKBUFFER_PROFILE(v) \ - v(BaselineJIT) \ + v(Baseline) \ v(DFG) \ v(FTL) \ v(DFGOSREntry) \ @@ -89,10 +90,6 @@ class LinkBuffer { v(LLIntThunk) \ v(DFGThunk) \ v(FTLThunk) \ - v(BoundFunctionThunk) \ - v(RemoteFunctionThunk) \ - v(SpecializedThunk) \ - v(VirtualThunk) \ v(WasmThunk) \ v(ExtraCTIThunk) \ v(WasmOMG) \ @@ -114,24 +111,15 @@ class LinkBuffer { static constexpr unsigned numberOfProfilesExcludingTotal = numberOfProfiles - 1; LinkBuffer(MacroAssembler& macroAssembler, void* ownerUID, Profile profile = Profile::Uncategorized, JITCompilationEffort effort = JITCompilationMustSucceed) - : m_size(0) - , m_didAllocate(false) -#ifndef NDEBUG - , m_completed(false) -#endif + : m_ownerUID(ownerUID) , m_profile(profile) { - UNUSED_PARAM(ownerUID); linkCode(macroAssembler, effort); } template LinkBuffer(MacroAssembler& macroAssembler, CodePtr code, size_t size, Profile profile = Profile::Uncategorized, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true) : m_size(size) - , m_didAllocate(false) -#ifndef NDEBUG - , m_completed(false) -#endif , m_profile(profile) , m_code(code.template retagged()) { @@ -301,17 +289,17 @@ class LinkBuffer { // displaying disassembly. template - CodeRef finalizeCodeWithoutDisassembly() + CodeRef finalizeCodeWithoutDisassembly(ASCIILiteral simpleName) { - return finalizeCodeWithoutDisassemblyImpl().template retagged(); + return finalizeCodeWithoutDisassemblyImpl(simpleName).template retagged(); } template - CodeRef finalizeCodeWithDisassembly(bool dumpDisassembly, const char* format, Args... args) + CodeRef finalizeCodeWithDisassembly(bool dumpDisassembly, ASCIILiteral simpleName, const char* format, Args... args) { ALLOW_NONLITERAL_FORMAT_BEGIN IGNORE_WARNINGS_BEGIN("format-security") - return finalizeCodeWithDisassemblyImpl(dumpDisassembly, format, args...).template retagged(); + return finalizeCodeWithDisassemblyImpl(dumpDisassembly, simpleName, format, args...).template retagged(); IGNORE_WARNINGS_END ALLOW_NONLITERAL_FORMAT_END } @@ -344,8 +332,8 @@ ALLOW_NONLITERAL_FORMAT_END void setIsThunk() { m_isThunk = true; } private: - JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassemblyImpl(); - JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassemblyImpl(bool dumpDisassembly, const char* format, ...) WTF_ATTRIBUTE_PRINTF(3, 4); + JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassemblyImpl(ASCIILiteral); + JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassemblyImpl(bool dumpDisassembly, ASCIILiteral, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5); #if ENABLE(BRANCH_COMPACTION) int executableOffsetFor(int location) @@ -403,8 +391,11 @@ ALLOW_NONLITERAL_FORMAT_END static void dumpCode(void* code, size_t); #endif + void logJITCodeForPerf(CodeRef&, ASCIILiteral); + RefPtr m_executableMemory; - size_t m_size; + size_t m_size { 0 }; + void* m_ownerUID { nullptr }; #if ENABLE(BRANCH_COMPACTION) AssemblerData m_assemblerStorage; #if CPU(ARM64E) @@ -412,9 +403,9 @@ ALLOW_NONLITERAL_FORMAT_END #endif bool m_shouldPerformBranchCompaction { true }; #endif - bool m_didAllocate; + bool m_didAllocate { false }; #ifndef NDEBUG - bool m_completed; + bool m_completed { false }; #endif #if ASSERT_ENABLED bool m_isJumpIsland { false }; @@ -431,13 +422,13 @@ ALLOW_NONLITERAL_FORMAT_END static size_t s_profileCummulativeLinkedCounts[numberOfProfiles]; }; -#define FINALIZE_CODE_IF(condition, linkBufferReference, resultPtrTag, ...) \ - (UNLIKELY((condition) || JSC::Options::logJIT() || JSC::Options::logJITCodeForPerf()) \ - ? (linkBufferReference).finalizeCodeWithDisassembly((condition), __VA_ARGS__) \ - : (linkBufferReference).finalizeCodeWithoutDisassembly()) +#define FINALIZE_CODE_IF(condition, linkBufferReference, resultPtrTag, simpleName, ...) \ + (UNLIKELY((condition) || JSC::Options::logJIT()) \ + ? (linkBufferReference).finalizeCodeWithDisassembly((condition), simpleName, __VA_ARGS__) \ + : (linkBufferReference).finalizeCodeWithoutDisassembly(simpleName)) -#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, resultPtrTag, ...) \ - FINALIZE_CODE_IF((shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__) +#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, resultPtrTag, simpleName, ...) \ + FINALIZE_CODE_IF((shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly()), linkBufferReference, resultPtrTag, simpleName, __VA_ARGS__) // Use this to finalize code, like so: // @@ -455,20 +446,20 @@ ALLOW_NONLITERAL_FORMAT_END // Note that the format string and print arguments are only evaluated when dumpDisassembly // is true, so you can hide expensive disassembly-only computations inside there. -#define FINALIZE_CODE(linkBufferReference, resultPtrTag, ...) \ - FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__) +#define FINALIZE_CODE(linkBufferReference, resultPtrTag, simpleName, ...) \ + FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly()), linkBufferReference, resultPtrTag, simpleName, __VA_ARGS__) #define FINALIZE_DFG_CODE(linkBufferReference, resultPtrTag, ...) \ - FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__) + FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly()), linkBufferReference, resultPtrTag, nullptr, __VA_ARGS__) -#define FINALIZE_REGEXP_CODE(linkBufferReference, resultPtrTag, dataLogFArgumentsForHeading) \ - FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpRegExpDisassembly(), linkBufferReference, resultPtrTag, dataLogFArgumentsForHeading) +#define FINALIZE_REGEXP_CODE(linkBufferReference, resultPtrTag, simpleName, dataLogFArgumentsForHeading) \ + FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpRegExpDisassembly(), linkBufferReference, resultPtrTag, simpleName, dataLogFArgumentsForHeading) -#define FINALIZE_WASM_CODE(linkBufferReference, resultPtrTag, ...) \ - FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpWasmDisassembly()), linkBufferReference, resultPtrTag, __VA_ARGS__) +#define FINALIZE_WASM_CODE(linkBufferReference, resultPtrTag, simpleName, ...) \ + FINALIZE_CODE_IF((JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpWasmDisassembly()), linkBufferReference, resultPtrTag, simpleName, __VA_ARGS__) -#define FINALIZE_WASM_CODE_FOR_MODE(mode, linkBufferReference, resultPtrTag, ...) \ - FINALIZE_CODE_IF(shouldDumpDisassemblyFor(mode), linkBufferReference, resultPtrTag, __VA_ARGS__) +#define FINALIZE_WASM_CODE_FOR_MODE(mode, linkBufferReference, resultPtrTag, simpleName, ...) \ + FINALIZE_CODE_IF(shouldDumpDisassemblyFor(mode), linkBufferReference, resultPtrTag, simpleName, __VA_ARGS__) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MIPSAssembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/MIPSAssembler.h deleted file mode 100644 index f89b1bcb..00000000 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MIPSAssembler.h +++ /dev/null @@ -1,1098 +0,0 @@ -/* - * Copyright (C) 2009-2023 Apple Inc. All rights reserved. - * Copyright (C) 2009 University of Szeged - * All rights reserved. - * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#pragma once - -#if ENABLE(ASSEMBLER) && CPU(MIPS) - -#include "AssemblerBuffer.h" -#include "JITCompilationEffort.h" -#include "MIPSRegisters.h" -#include -#include -#include - -namespace JSC { - -typedef uint32_t MIPSWord; - -namespace RegisterNames { -typedef enum : int8_t { -#define REGISTER_ID(id, name, r, cs) id, - FOR_EACH_GP_REGISTER(REGISTER_ID) -#undef REGISTER_ID -#define REGISTER_ALIAS(id, alias) id = alias, - FOR_EACH_REGISTER_ALIAS(REGISTER_ALIAS) -#undef REGISTER_ALIAS - InvalidGPRReg = -1, -} RegisterID; - -typedef enum : int8_t { -#define REGISTER_ID(id, name, idx) id = idx, - FOR_EACH_SP_REGISTER(REGISTER_ID) -#undef REGISTER_ID -} SPRegisterID; - -typedef enum : int8_t { -#define REGISTER_ID(id, name, r, cs) id, - FOR_EACH_FP_REGISTER(REGISTER_ID) -#undef REGISTER_ID - InvalidFPRReg = -1, -} FPRegisterID; - -} // namespace MIPSRegisters - -class MIPSAssembler { -public: - typedef MIPSRegisters::RegisterID RegisterID; - typedef MIPSRegisters::SPRegisterID SPRegisterID; - typedef MIPSRegisters::FPRegisterID FPRegisterID; - typedef SegmentedVector Jumps; - - static constexpr RegisterID firstRegister() { return MIPSRegisters::r0; } - static constexpr RegisterID lastRegister() { return MIPSRegisters::r31; } - static constexpr unsigned numberOfRegisters() { return lastRegister() - firstRegister() + 1; } - - static constexpr SPRegisterID firstSPRegister() { return MIPSRegisters::fir; } - static constexpr SPRegisterID lastSPRegister() { return MIPSRegisters::pc; } - static constexpr unsigned numberOfSPRegisters() { return lastSPRegister() - firstSPRegister() + 1; } - - static constexpr FPRegisterID firstFPRegister() { return MIPSRegisters::f0; } - static constexpr FPRegisterID lastFPRegister() { return MIPSRegisters::f31; } - static constexpr unsigned numberOfFPRegisters() { return lastFPRegister() - firstFPRegister() + 1; } - - static const char* gprName(RegisterID id) - { - ASSERT(id >= firstRegister() && id <= lastRegister()); - static const char* const nameForRegister[numberOfRegisters()] = { -#define REGISTER_NAME(id, name, r, c) name, - FOR_EACH_GP_REGISTER(REGISTER_NAME) -#undef REGISTER_NAME - }; - return nameForRegister[id]; - } - - static const char* sprName(SPRegisterID id) - { - ASSERT(id >= firstSPRegister() && id <= lastSPRegister()); - static const char* const nameForRegister[numberOfSPRegisters()] = { -#define REGISTER_NAME(id, name, idx) name, - FOR_EACH_SP_REGISTER(REGISTER_NAME) -#undef REGISTER_NAME - }; - return nameForRegister[id]; - } - - static const char* fprName(FPRegisterID id) - { - ASSERT(id >= firstFPRegister() && id <= lastFPRegister()); - static const char* const nameForRegister[numberOfFPRegisters()] = { -#define REGISTER_NAME(id, name, r, cs) name, - FOR_EACH_FP_REGISTER(REGISTER_NAME) -#undef REGISTER_NAME - }; - return nameForRegister[id]; - } - - MIPSAssembler() - : m_indexOfLastWatchpoint(INT_MIN) - , m_indexOfTailOfLastWatchpoint(INT_MIN) - { - } - - AssemblerBuffer& buffer() { return m_buffer; } - - // MIPS instruction opcode field position - enum { - OP_SH_RD = 11, - OP_SH_RT = 16, - OP_SH_RS = 21, - OP_SH_SHAMT = 6, - OP_SH_CODE = 16, - OP_SH_FD = 6, - OP_SH_FS = 11, - OP_SH_FT = 16, - OP_SH_MSB = 11, - OP_SH_LSB = 6 - }; - - // FCSR Bits - enum { - FP_CAUSE_INVALID_OPERATION = 1 << 16 - }; - - void emitInst(MIPSWord op) - { - void* oldBase = m_buffer.data(); - - m_buffer.putInt(op); - - void* newBase = m_buffer.data(); - if (oldBase != newBase) - relocateJumps(oldBase, newBase); - } - - void nop() - { - emitInst(0x00000000); - } - - using CopyFunction = void*(&)(void*, const void*, size_t); - - template - ALWAYS_INLINE static void fillNops(void* base, size_t size) - { - UNUSED_PARAM(copy); - RELEASE_ASSERT(!(size % sizeof(int32_t))); - - int32_t* ptr = static_cast(base); - const size_t num32s = size / sizeof(int32_t); - const int32_t insn = 0x00000000; - for (size_t i = 0; i < num32s; i++) - *ptr++ = insn; - } - - void sync() - { - // FIXME: https://bugs.webkit.org/show_bug.cgi?id=169984 - // We might get a performance improvements by using SYNC_MB in some or - // all cases. - emitInst(0x0000000f); - } - - /* Need to insert one load data delay nop for mips1. */ - void loadDelayNop() - { -#if WTF_MIPS_ISA(1) - nop(); -#endif - } - - /* Need to insert one coprocessor access delay nop for mips1. */ - void copDelayNop() - { -#if WTF_MIPS_ISA(1) - nop(); -#endif - } - - void move(RegisterID rd, RegisterID rs) - { - /* addu */ - emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS)); - } - - /* Set an immediate value to a register. This may generate 1 or 2 - instructions. */ - void li(RegisterID dest, int imm) - { - if (imm >= -32768 && imm <= 32767) - addiu(dest, MIPSRegisters::zero, imm); - else if (imm >= 0 && imm < 65536) - ori(dest, MIPSRegisters::zero, imm); - else { - lui(dest, imm >> 16); - if (imm & 0xffff) - ori(dest, dest, imm); - } - } - - void ext(RegisterID rt, RegisterID rs, int pos, int size) - { - int msb = size - 1; - emitInst(0x7c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (pos << OP_SH_LSB) | (msb << OP_SH_MSB)); - } - - void mfhc1(RegisterID rt, FPRegisterID fs) - { - emitInst(0x4460000 | (rt << OP_SH_RT) | (fs << OP_SH_FS)); - } - - void lui(RegisterID rt, int imm) - { - emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff)); - } - - void clz(RegisterID rd, RegisterID rs) - { - emitInst(0x70000020 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rd << OP_SH_RT)); - } - - void addiu(RegisterID rt, RegisterID rs, int imm) - { - emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void addu(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void subu(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void mult(RegisterID rs, RegisterID rt) - { - emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void div(RegisterID rs, RegisterID rt) - { - emitInst(0x0000001a | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void mfhi(RegisterID rd) - { - emitInst(0x00000010 | (rd << OP_SH_RD)); - } - - void mflo(RegisterID rd) - { - emitInst(0x00000012 | (rd << OP_SH_RD)); - } - - void mul(RegisterID rd, RegisterID rs, RegisterID rt) - { -#if WTF_MIPS_ISA_AT_LEAST(32) - emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); -#else - mult(rs, rt); - mflo(rd); -#endif - } - - void andInsn(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void andi(RegisterID rt, RegisterID rs, int imm) - { - emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void nor(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void orInsn(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void ori(RegisterID rt, RegisterID rs, int imm) - { - emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void xori(RegisterID rt, RegisterID rs, int imm) - { - emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void slt(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void sltu(RegisterID rd, RegisterID rs, RegisterID rt) - { - emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT)); - } - - void slti(RegisterID rt, RegisterID rs, int imm) - { - emitInst(0x28000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void sltiu(RegisterID rt, RegisterID rs, int imm) - { - emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void sll(RegisterID rd, RegisterID rt, int shamt) - { - emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT)); - } - - void sllv(RegisterID rd, RegisterID rt, RegisterID rs) - { - emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS)); - } - - void sra(RegisterID rd, RegisterID rt, int shamt) - { - emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT)); - } - - void srav(RegisterID rd, RegisterID rt, RegisterID rs) - { - emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS)); - } - - void srl(RegisterID rd, RegisterID rt, int shamt) - { - emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT)); - } - - void srlv(RegisterID rd, RegisterID rt, RegisterID rs) - { - emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS)); - } - - void lb(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void lbu(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void lw(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void lwl(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void lwr(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void lh(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void lhu(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - loadDelayNop(); - } - - void sb(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - } - - void sh(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - } - - void sw(RegisterID rt, RegisterID rs, int offset) - { - emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff)); - } - - void jr(RegisterID rs) - { - emitInst(0x00000008 | (rs << OP_SH_RS)); - } - - void jalr(RegisterID rs) - { - emitInst(0x0000f809 | (rs << OP_SH_RS)); - } - - void jal() - { - emitInst(0x0c000000); - } - - void bkpt() - { - int value = 512; /* BRK_BUG */ - emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE)); - } - - static bool isBkpt(void* address) - { - int value = 512; /* BRK_BUG */ - MIPSWord expected = (0x0000000d | ((value & 0x3ff) << OP_SH_CODE)); - MIPSWord candidateInstruction = *reinterpret_cast(address); - return candidateInstruction == expected; - } - - void bgez(RegisterID rs, int imm) - { - emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void bltz(RegisterID rs, int imm) - { - emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff)); - } - - void beq(RegisterID rs, RegisterID rt, int imm) - { - emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff)); - } - - void bne(RegisterID rs, RegisterID rt, int imm) - { - emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff)); - } - - void bc1t() - { - emitInst(0x45010000); - } - - void bc1f() - { - emitInst(0x45000000); - } - - void appendJump() - { - m_jumps.append(m_buffer.label()); - } - - void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - } - - void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - } - - void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - } - - void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - } - - void lwc1(FPRegisterID ft, RegisterID rs, int offset) - { - emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff)); - copDelayNop(); - } - - void ldc1(FPRegisterID ft, RegisterID rs, int offset) - { - emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff)); - } - - void swc1(FPRegisterID ft, RegisterID rs, int offset) - { - emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff)); - } - - void sdc1(FPRegisterID ft, RegisterID rs, int offset) - { - emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff)); - } - - void mtc1(RegisterID rt, FPRegisterID fs) - { - emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT)); - copDelayNop(); - } - - void mthc1(RegisterID rt, FPRegisterID fs) - { - emitInst(0x44e00000 | (fs << OP_SH_FS) | (rt << OP_SH_RT)); - copDelayNop(); - } - - void mfc1(RegisterID rt, FPRegisterID fs) - { - emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT)); - copDelayNop(); - } - - void sqrtd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void absd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200005 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void movd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void negd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200007 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void truncwd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void roundwd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x4620000c | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void cvtdw(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void cvtds(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void cvtwd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void cvtsd(FPRegisterID fd, FPRegisterID fs) - { - emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); - } - - void ceqd(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cngtd(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cnged(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cltd(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cled(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cueqd(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void coled(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void coltd(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void culed(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cultd(FPRegisterID fs, FPRegisterID ft) - { - emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT)); - copDelayNop(); - } - - void cfc1(RegisterID rt, SPRegisterID fs) - { - emitInst(0x44400000 | (rt << OP_SH_RT) | (fs << OP_SH_FS)); - copDelayNop(); - } - - // General helpers - - AssemblerLabel labelIgnoringWatchpoints() - { - return m_buffer.label(); - } - - AssemblerLabel labelForWatchpoint() - { - AssemblerLabel result = m_buffer.label(); - if (static_cast(result.offset()) != m_indexOfLastWatchpoint) - result = label(); - m_indexOfLastWatchpoint = result.offset(); - m_indexOfTailOfLastWatchpoint = result.offset() + maxJumpReplacementSize(); - return result; - } - - AssemblerLabel label() - { - AssemblerLabel result = m_buffer.label(); - while (UNLIKELY(static_cast(result.offset()) < m_indexOfTailOfLastWatchpoint)) { - nop(); - result = m_buffer.label(); - } - return result; - } - - AssemblerLabel align(int alignment) - { - while (!m_buffer.isAligned(alignment)) - bkpt(); - - return label(); - } - - static void* getRelocatedAddress(void* code, AssemblerLabel label) - { - return reinterpret_cast(reinterpret_cast(code) + label.offset()); - } - - static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) - { - return b.offset() - a.offset(); - } - - // Assembler admin methods: - - size_t codeSize() const - { - return m_buffer.codeSize(); - } - - unsigned debugOffset() { return m_buffer.debugOffset(); } - - // Assembly helpers for moving data between fp and registers. - void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn) - { -#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64 - mfc1(rd1, rn); - mfhc1(rd2, rn); -#else - mfc1(rd1, rn); - mfc1(rd2, FPRegisterID(rn + 1)); -#endif - } - - void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2) - { -#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64 - mtc1(rn1, rd); - mthc1(rn2, rd); -#else - mtc1(rn1, rd); - mtc1(rn2, FPRegisterID(rd + 1)); -#endif - } - - static unsigned getCallReturnOffset(AssemblerLabel call) - { - // The return address is after a call and a delay slot instruction - return call.offset(); - } - - // Linking & patching: - // - // 'link' and 'patch' methods are for use on unprotected code - such as the code - // within the AssemblerBuffer, and code being patched by the patch buffer. Once - // code has been finalized it is (platform support permitting) within a non- - // writable region of memory; to modify the code in an execute-only execuable - // pool the 'repatch' and 'relink' methods should be used. - - static size_t linkDirectJump(void* code, void* to) - { - MIPSWord* insn = reinterpret_cast(reinterpret_cast(code)); - size_t ops = 0; - int32_t slotAddr = reinterpret_cast(insn) + 4; - int32_t toAddr = reinterpret_cast(to); - - if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) { - // lui - *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff); - ++insn; - // ori - *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff); - ++insn; - // jr - *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS); - ++insn; - ops = 4 * sizeof(MIPSWord); - } else { - // j - *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2); - ++insn; - ops = 2 * sizeof(MIPSWord); - } - // nop - *insn = 0x00000000; - return ops; - } - - void linkJump(AssemblerLabel from, AssemblerLabel to) - { - ASSERT(to.isSet()); - ASSERT(from.isSet()); - MIPSWord* insn = reinterpret_cast(reinterpret_cast(m_buffer.data()) + from.offset()); - MIPSWord* toPos = reinterpret_cast(reinterpret_cast(m_buffer.data()) + to.offset()); - - ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5))); - insn = insn - 6; - linkWithOffset(insn, toPos); - } - - static void linkJump(void* code, AssemblerLabel from, void* to) - { - ASSERT(from.isSet()); - MIPSWord* insn = reinterpret_cast(reinterpret_cast(code) + from.offset()); - - ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5))); - insn = insn - 6; - linkWithOffset(insn, to); - } - - static void linkCall(void* code, AssemblerLabel from, void* to) - { - MIPSWord* insn = reinterpret_cast(reinterpret_cast(code) + from.offset()); - linkCallInternal(insn, to); - } - - static void linkPointer(void* code, AssemblerLabel from, void* to) - { - MIPSWord* insn = reinterpret_cast(reinterpret_cast(code) + from.offset()); - ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui - *insn = (*insn & 0xffff0000) | ((reinterpret_cast(to) >> 16) & 0xffff); - insn++; - ASSERT((*insn & 0xfc000000) == 0x34000000); // ori - *insn = (*insn & 0xffff0000) | (reinterpret_cast(to) & 0xffff); - } - - static void relinkJump(void* from, void* to) - { - MIPSWord* insn = reinterpret_cast(from); - - ASSERT(!(*(insn - 1)) && !(*(insn - 5))); - insn = insn - 6; - int flushSize = linkWithOffset(insn, to); - - cacheFlush(insn, flushSize); - } - - static void relinkCall(void* from, void* to) - { - void* start; - int size = linkCallInternal(from, to); - if (size == sizeof(MIPSWord)) - start = reinterpret_cast(reinterpret_cast(from) - 2 * sizeof(MIPSWord)); - else - start = reinterpret_cast(reinterpret_cast(from) - 4 * sizeof(MIPSWord)); - - cacheFlush(start, size); - } - - static void relinkTailCall(void* from, void* to) - { - relinkJump(from, to); - } - - static void repatchPointer(void* from, void* toPtr) - { - int32_t to = reinterpret_cast(toPtr); - MIPSWord* insn = reinterpret_cast(from); - ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui - *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff); - insn++; - ASSERT((*insn & 0xfc000000) == 0x34000000); // ori - *insn = (*insn & 0xffff0000) | (to & 0xffff); - cacheFlush(from, 2 * sizeof(MIPSWord)); - } - - static int32_t readInt32(void* from) - { - MIPSWord* insn = reinterpret_cast(from); - ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui - int32_t result = (*insn & 0x0000ffff) << 16; - insn++; - ASSERT((*insn & 0xfc000000) == 0x34000000); // ori - result |= *insn & 0x0000ffff; - return result; - } - - static void* readPointer(void* from) - { - return reinterpret_cast(readInt32(from)); - } - - static void* readCallTarget(void* from) - { - MIPSWord* insn = reinterpret_cast(from); - insn -= 4; - ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui - int32_t result = (*insn & 0x0000ffff) << 16; - insn++; - ASSERT((*insn & 0xfc000000) == 0x34000000); // ori - result |= *insn & 0x0000ffff; - return reinterpret_cast(result); - } - - static void cacheFlush(void* code, size_t size) - { - intptr_t end = reinterpret_cast(code) + size; - __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end)); - } - - static ptrdiff_t maxJumpReplacementSize() - { - return sizeof(MIPSWord) * 4; - } - - static constexpr ptrdiff_t patchableJumpSize() - { - return sizeof(MIPSWord) * 8; - } - - static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm) - { - MIPSWord* insn = static_cast(instructionStart); - size_t codeSize = 2 * sizeof(MIPSWord); - - // lui - *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff); - ++insn; - // ori - *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff); - ++insn; - // if jr $t9 - if (*insn == 0x03200008) { - *insn = 0x00000000; - codeSize += sizeof(MIPSWord); - } - cacheFlush(instructionStart, codeSize); - } - - static void replaceWithJump(void* instructionStart, void* to) - { - ASSERT(!(bitwise_cast(instructionStart) & 3)); - ASSERT(!(bitwise_cast(to) & 3)); - size_t ops = linkDirectJump(instructionStart, to); - cacheFlush(instructionStart, ops); - } - - /* Update each jump in the buffer of newBase. */ - void relocateJumps(void* oldBase, void* newBase) - { - // Check each jump - for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) { - int pos = iter->offset(); - MIPSWord* insn = reinterpret_cast(reinterpret_cast(newBase) + pos); - insn = insn + 2; - // Need to make sure we have 5 valid instructions after pos - if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord)) - continue; - - if ((*insn & 0xfc000000) == 0x08000000) { // j - int offset = *insn & 0x03ffffff; - int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase; - int topFourBits = (oldInsnAddress + 4) >> 28; - int oldTargetAddress = (topFourBits << 28) | (offset << 2); - int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase; - int newInsnAddress = (int)insn; - if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28)) - *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff); - else { - /* lui */ - *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff); - /* ori */ - *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff); - /* jr */ - *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS); - } - } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui - int high = (*insn & 0xffff) << 16; - int low = *(insn + 1) & 0xffff; - int oldTargetAddress = high | low; - int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase; - /* lui */ - *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff); - /* ori */ - *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff); - } - } - } - -private: - static int linkWithOffset(MIPSWord* insn, void* to) - { - ASSERT((*insn & 0xfc000000) == 0x10000000 // beq - || (*insn & 0xfc000000) == 0x14000000 // bne - || (*insn & 0xffff0000) == 0x45010000 // bc1t - || (*insn & 0xffff0000) == 0x45000000); // bc1f - intptr_t diff = (reinterpret_cast(to) - reinterpret_cast(insn) - 4) >> 2; - - if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) { - /* - Convert the sequence: - beq $2, $3, target - nop - b 1f - nop - nop - nop - 1: - - to the new sequence if possible: - bne $2, $3, 1f - nop - j target - nop - nop - nop - 1: - - OR to the new sequence: - bne $2, $3, 1f - nop - lui $25, target >> 16 - ori $25, $25, target & 0xffff - jr $25 - nop - 1: - - Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t. - */ - - if (*(insn + 2) == 0x10000003) { - if ((*insn & 0xfc000000) == 0x10000000) // beq - *insn = (*insn & 0x03ff0000) | 0x14000005; // bne - else if ((*insn & 0xfc000000) == 0x14000000) // bne - *insn = (*insn & 0x03ff0000) | 0x10000005; // beq - else if ((*insn & 0xffff0000) == 0x45010000) // bc1t - *insn = 0x45000005; // bc1f - else if ((*insn & 0xffff0000) == 0x45000000) // bc1f - *insn = 0x45010005; // bc1t - else - ASSERT(0); - } - - insn = insn + 2; - if ((reinterpret_cast(insn) + 4) >> 28 - == reinterpret_cast(to) >> 28) { - *insn = 0x08000000 | ((reinterpret_cast(to) >> 2) & 0x3ffffff); - *(insn + 1) = 0; - return 4 * sizeof(MIPSWord); - } - - intptr_t newTargetAddress = reinterpret_cast(to); - /* lui */ - *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff); - /* ori */ - *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff); - /* jr */ - *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS); - return 5 * sizeof(MIPSWord); - } - - *insn = (*insn & 0xffff0000) | (diff & 0xffff); - return sizeof(MIPSWord); - } - - static int linkCallInternal(void* from, void* to) - { - MIPSWord* insn = reinterpret_cast(from); - insn = insn - 4; - - if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal - if ((reinterpret_cast(from) - 4) >> 28 - == reinterpret_cast(to) >> 28) { - *(insn + 2) = 0x0c000000 | ((reinterpret_cast(to) >> 2) & 0x3ffffff); - return sizeof(MIPSWord); - } - - /* lui $25, (to >> 16) & 0xffff */ - *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast(to) >> 16) & 0xffff); - /* ori $25, $25, to & 0xffff */ - *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast(to) & 0xffff); - /* jalr $25 */ - *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS); - return 3 * sizeof(MIPSWord); - } - - ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui - ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori - - /* lui */ - *insn = (*insn & 0xffff0000) | ((reinterpret_cast(to) >> 16) & 0xffff); - /* ori */ - *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast(to) & 0xffff); - return 2 * sizeof(MIPSWord); - } - - AssemblerBuffer m_buffer; - Jumps m_jumps; - int m_indexOfLastWatchpoint; - int m_indexOfTailOfLastWatchpoint; -}; - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) && CPU(MIPS) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MIPSRegisters.h b/vendor/webkit/Source/JavaScriptCore/assembler/MIPSRegisters.h deleted file mode 100644 index 6a402c22..00000000 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MIPSRegisters.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (C) 2019 Metrological Group B.V. - * Copyright (C) 2019 Igalia S.L. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#pragma once - -#include - -#if CPU(MIPS) - -#define RegisterNames MIPSRegisters - -#define FOR_EACH_REGISTER(macro) \ - FOR_EACH_GP_REGISTER(macro) \ - FOR_EACH_FP_REGISTER(macro) - -#define FOR_EACH_GP_REGISTER(macro) \ - macro(r0, "zero", 0, 0) \ - macro(r1, "at", 0, 0) \ - macro(r2, "v0", 0, 0) \ - macro(r3, "v1", 0, 0) \ - macro(r4, "a0", 0, 0) \ - macro(r5, "a1", 0, 0) \ - macro(r6, "a2", 0, 0) \ - macro(r7, "a3", 0, 0) \ - macro(r8, "t0", 0, 0) \ - macro(r9, "t1", 0, 0) \ - macro(r10, "t2", 0, 0) \ - macro(r11, "t3", 0, 0) \ - macro(r12, "t4", 0, 0) \ - macro(r13, "t5", 0, 0) \ - macro(r14, "t6", 0, 0) \ - macro(r15, "t7", 0, 0) \ - macro(r16, "s0", 0, 1) \ - macro(r17, "s1", 0, 1) \ - macro(r18, "s2", 0, 0) \ - macro(r19, "s3", 0, 0) \ - macro(r20, "s4", 0, 0) \ - macro(r21, "s5", 0, 0) \ - macro(r22, "s6", 0, 0) \ - macro(r23, "s7", 0, 0) \ - macro(r24, "t8", 0, 0) \ - macro(r25, "t9", 0, 0) \ - macro(r26, "k0", 0, 0) \ - macro(r27, "k1", 0, 0) \ - macro(r28, "gp", 0, 0) \ - macro(r29, "sp", 0, 0) \ - macro(r30, "fp", 0, 0) \ - macro(r31, "ra", 0, 0) - -#define FOR_EACH_REGISTER_ALIAS(macro) \ - macro(zero, r0) \ - macro(at, r1) \ - macro(v0, r2) \ - macro(v1, r3) \ - macro(a0, r4) \ - macro(a1, r5) \ - macro(a2, r6) \ - macro(a3, r7) \ - macro(t0, r8) \ - macro(t1, r9) \ - macro(t2, r10) \ - macro(t3, r11) \ - macro(t4, r12) \ - macro(t5, r13) \ - macro(t6, r14) \ - macro(t7, r15) \ - macro(s0, r16) \ - macro(s1, r17) \ - macro(s2, r18) \ - macro(s3, r19) \ - macro(s4, r20) \ - macro(s5, r21) \ - macro(s6, r22) \ - macro(s7, r23) \ - macro(t8, r24) \ - macro(t9, r25) \ - macro(k0, r26) \ - macro(k1, r27) \ - macro(gp, r28) \ - macro(sp, r29) \ - macro(fp, r30) \ - macro(ra, r31) - -#define FOR_EACH_SP_REGISTER(macro) \ - macro(fir, "fir", 0) \ - macro(fccr, "fccr", 25) \ - macro(fexr, "fexr", 26) \ - macro(fenr, "fenr", 28) \ - macro(fcsr, "fcsr", 31) \ - macro(pc, "pc", 32) - -#define FOR_EACH_FP_REGISTER(macro) \ - macro(f0, "f0", 0, 0) \ - macro(f1, "f1", 0, 0) \ - macro(f2, "f2", 0, 0) \ - macro(f3, "f3", 0, 0) \ - macro(f4, "f4", 0, 0) \ - macro(f5, "f5", 0, 0) \ - macro(f6, "f6", 0, 0) \ - macro(f7, "f7", 0, 0) \ - macro(f8, "f8", 0, 0) \ - macro(f9, "f9", 0, 0) \ - macro(f10, "f10", 0, 0) \ - macro(f11, "f11", 0, 0) \ - macro(f12, "f12", 0, 0) \ - macro(f13, "f13", 0, 0) \ - macro(f14, "f14", 0, 0) \ - macro(f15, "f15", 0, 0) \ - macro(f16, "f16", 0, 0) \ - macro(f17, "f17", 0, 0) \ - macro(f18, "f18", 0, 0) \ - macro(f19, "f19", 0, 0) \ - macro(f20, "f20", 0, 0) \ - macro(f21, "f21", 0, 0) \ - macro(f22, "f22", 0, 0) \ - macro(f23, "f23", 0, 0) \ - macro(f24, "f24", 0, 0) \ - macro(f25, "f25", 0, 0) \ - macro(f26, "f26", 0, 0) \ - macro(f27, "f27", 0, 0) \ - macro(f28, "f28", 0, 0) \ - macro(f29, "f29", 0, 0) \ - macro(f30, "f30", 0, 0) \ - macro(f31, "f31", 0, 0) - -#endif // CPU(MIPS) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.cpp index 4cd85936..0dec3010 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.cpp @@ -104,6 +104,9 @@ void printInternal(PrintStream& out, MacroAssembler::RelationalCondition cond) void printInternal(PrintStream& out, MacroAssembler::ResultCondition cond) { switch (cond) { + case MacroAssembler::Carry: + out.print("Carry"); + return; case MacroAssembler::Overflow: out.print("Overflow"); return; diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.h index 1a70e1c7..dcd680ca 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssembler.h @@ -59,7 +59,6 @@ #define TARGET_ASSEMBLER ARMv7Assembler #define TARGET_MACROASSEMBLER MacroAssemblerARMv7 #include "MacroAssemblerARMv7.h" -namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; #elif CPU(ARM64E) #define TARGET_ASSEMBLER ARM64EAssembler @@ -71,11 +70,6 @@ namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; #define TARGET_MACROASSEMBLER MacroAssemblerARM64 #include "MacroAssemblerARM64.h" -#elif CPU(MIPS) -#define TARGET_ASSEMBLER MIPSAssembler -#define TARGET_MACROASSEMBLER MacroAssemblerMIPS -#include "MacroAssemblerMIPS.h" - #elif CPU(X86_64) #define TARGET_ASSEMBLER X86Assembler #define TARGET_MACROASSEMBLER MacroAssemblerX86_64 @@ -185,11 +179,13 @@ class MacroAssembler : public MacroAssemblerBase { using MacroAssemblerBase::urshift32; using MacroAssemblerBase::xor32; +#if CPU(ARM64) || CPU(X86_64) || CPU(RISCV64) || CPU(ARM_THUMB2) + using MacroAssemblerBase::convertInt32ToDouble; +#endif #if CPU(ARM64) || CPU(X86_64) || CPU(RISCV64) using MacroAssemblerBase::and64; using MacroAssemblerBase::or64; using MacroAssemblerBase::xor64; - using MacroAssemblerBase::convertInt32ToDouble; using MacroAssemblerBase::store64; #endif @@ -550,7 +546,11 @@ class MacroAssembler : public MacroAssemblerBase { // consumes some register in some way. void retVoid() { ret(); } void ret32(RegisterID) { ret(); } +#if CPU(ARM_THUMB2) + void ret64(RegisterID, RegisterID) { ret(); } +#else void ret64(RegisterID) { ret(); } +#endif void retFloat(FPRegisterID) { ret(); } void retDouble(FPRegisterID) { ret(); } @@ -2260,6 +2260,24 @@ class MacroAssembler : public MacroAssemblerBase { void print(Arguments&&... args); void print(Printer::PrintRecordList*); + + template + void nearCallThunk(CodeLocationLabel label) + { + nearCall().linkThunk(label, this); + } + + template + void nearTailCallThunk(CodeLocationLabel label) + { + nearTailCall().linkThunk(label, this); + } + + template + void jumpThunk(CodeLocationLabel label) + { + jump().linkThunk(label, this); + } }; } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp index 3feedfe9..234146e5 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp @@ -31,6 +31,7 @@ #include "JSCPtrTag.h" #include "ProbeContext.h" #include +#include #if OS(LINUX) #include @@ -57,6 +58,8 @@ static unsigned long getauxval(unsigned long type) namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(MacroAssemblerARM64); + JSC_DECLARE_JIT_OPERATION(ctiMasmProbeTrampoline, void, ()); JSC_ANNOTATE_JIT_OPERATION_PROBE(ctiMasmProbeTrampoline); JSC_DECLARE_JIT_OPERATION(ctiMasmProbeTrampolineSIMD, void, ()); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h index 1d744c92..3208a8c3 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h @@ -32,6 +32,7 @@ #include "JITOperationValidation.h" #include #include +#include namespace JSC { @@ -39,6 +40,7 @@ using Assembler = TARGET_ASSEMBLER; class Reg; class MacroAssemblerARM64 : public AbstractMacroAssembler { + WTF_MAKE_TZONE_ALLOCATED(MacroAssemblerARM64); public: static constexpr unsigned numGPRs = 32; static constexpr unsigned numFPRs = 32; @@ -87,11 +89,10 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { Vector& jumpsToLink() { return m_assembler.jumpsToLink(); } static bool canCompact(JumpType jumpType) { return Assembler::canCompact(jumpType); } - static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return Assembler::computeJumpType(jumpType, from, to); } static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return Assembler::computeJumpType(record, from, to); } static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return Assembler::jumpSizeDelta(jumpType, jumpLinkType); } - template + template ALWAYS_INLINE static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return Assembler::link(record, from, fromInstruction, to); } static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) @@ -114,6 +115,7 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { }; enum ResultCondition { + Carry = Assembler::ConditionCS, Overflow = Assembler::ConditionVS, Signed = Assembler::ConditionMI, PositiveOrZero = Assembler::ConditionPL, @@ -904,6 +906,8 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f); } @@ -1229,6 +1233,8 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.ror<64>(dest, src, imm.m_value & 63); } @@ -1269,6 +1275,8 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.asr<64>(dest, src, imm.m_value & 0x3f); } @@ -1431,6 +1439,8 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f); } @@ -2120,6 +2130,23 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { transfer64(src, dest); } + void transfer32(BaseIndex src, BaseIndex dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + store32(getCachedDataTempRegisterIDAndInvalidate(), dest); + } + + void transfer64(BaseIndex src, BaseIndex dest) + { + load64(src, getCachedDataTempRegisterIDAndInvalidate()); + store64(getCachedDataTempRegisterIDAndInvalidate(), dest); + } + + void transferPtr(BaseIndex src, BaseIndex dest) + { + transfer64(src, dest); + } + DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) { DataLabel32 label(this); @@ -4073,6 +4100,11 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { return branch32(cond, left, right); } + Jump branch32WithMemory16(RelationalCondition cond, Address left, RegisterID right) + { + MacroAssemblerHelpers::load16OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } // Arithmetic control flow operations: // @@ -4386,11 +4418,12 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { ALWAYS_INLINE Call call(RegisterID target, RegisterID callTag) { return UNUSED_PARAM(callTag), call(target, NoPtrTag); } ALWAYS_INLINE Call call(Address address, RegisterID callTag) { return UNUSED_PARAM(callTag), call(address, NoPtrTag); } - ALWAYS_INLINE void callOperation(const CodePtr operation) + template + ALWAYS_INLINE void callOperation(const CodePtr operation) { auto tmp = getCachedDataTempRegisterIDAndInvalidate(); move(TrustedImmPtr(operation.taggedPtr()), tmp); - call(tmp, OperationPtrTag); + call(tmp, tag); } ALWAYS_INLINE Jump jump() @@ -5932,7 +5965,13 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler { { Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); } - + + template + static void replaceWithNops(CodeLocationLabel instructionStart, size_t memoryToFillWithNopsInBytes) + { + Assembler::replaceWithNops(instructionStart.dataLocation(), memoryToFillWithNopsInBytes); + } + static ptrdiff_t maxJumpReplacementSize() { return Assembler::maxJumpReplacementSize(); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64E.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64E.h index d38773b5..6925f04b 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64E.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARM64E.h @@ -245,11 +245,12 @@ class MacroAssemblerARM64E : public MacroAssemblerARM64 { call(dataTempRegister, tag); } - ALWAYS_INLINE void callOperation(const CodePtr operation) + template + ALWAYS_INLINE void callOperation(const CodePtr operation) { auto tmp = getCachedDataTempRegisterIDAndInvalidate(); move(TrustedImmPtr(operation.taggedPtr()), tmp); - call(tmp, OperationPtrTag); + call(tmp, tag); } ALWAYS_INLINE Jump jump() { return MacroAssemblerARM64::jump(); } diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h index 41107610..fb7b97e3 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h @@ -114,11 +114,10 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { Vector& jumpsToLink() { return m_assembler.jumpsToLink(); } static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); } - static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); } static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); } static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); } - template + template ALWAYS_INLINE static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); } struct ArmAddress { @@ -166,6 +165,7 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { }; enum ResultCondition { + Carry = ARMv7Assembler::ConditionCS, Overflow = ARMv7Assembler::ConditionVS, Signed = ARMv7Assembler::ConditionMI, PositiveOrZero = ARMv7Assembler::ConditionPL, @@ -534,6 +534,22 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { rotateRight32(srcDst, imm, srcDst); } + void rotateLeft32(RegisterID src, RegisterID shift, RegisterID dest) + { + RegisterID scratch = getCachedDataTempRegisterIDAndInvalidate(); + m_assembler.ARM_and(scratch, shift, ARMThumbImmediate::makeEncodedImm(0x1f)); + m_assembler.sub(scratch, ARMThumbImmediate::makeUInt12(32), scratch); + m_assembler.ror(dest, src, scratch); + } + + void rotateLeft32(RegisterID src, TrustedImm32 shift, RegisterID dest) + { + RegisterID scratch = getCachedDataTempRegisterIDAndInvalidate(); + move(shift, scratch); + m_assembler.ARM_and(scratch, scratch, ARMThumbImmediate::makeEncodedImm(0x1f)); + m_assembler.sub(scratch, ARMThumbImmediate::makeUInt12(32), scratch); + m_assembler.ror(dest, src, scratch); + } void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) { RegisterID scratch = getCachedDataTempRegisterIDAndInvalidate(); @@ -592,6 +608,13 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { urshift32(dest, imm, dest); } + void addUnsignedRightShift32(RegisterID src1, RegisterID src2, TrustedImm32 amount, RegisterID dest) + { + // dest = src1 + (src2 >> amount) + urshift32(src2, amount, dataTempRegister); + add32(src1, dataTempRegister, dest); + } + void sub32(RegisterID src, RegisterID dest) { m_assembler.sub(dest, dest, src); @@ -1283,6 +1306,28 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { storePair32(src1, src2, Address(armAddress.base, armAddress.u.offset)); } + void transfer32(Address src, Address dest) + { + load32(src, dataTempRegister); + store32(dataTempRegister, dest); + } + + void transferPtr(Address src, Address dest) + { + transfer32(src, dest); + } + + void transfer32(BaseIndex src, BaseIndex dest) + { + load32(src, dataTempRegister); + store32(dataTempRegister, dest); + } + + void transferPtr(BaseIndex src, BaseIndex dest) + { + transfer32(src, dest); + } + void storeCond8(RegisterID src, Address addr, RegisterID result) { ASSERT(!addr.offset); @@ -1366,6 +1411,34 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { return !immediate.isUInt12(); } + // Popcount (could be implemented via VCNT?) + + static bool supportsCountPopulation() { return false; } + + NO_RETURN_DUE_TO_CRASH void countPopulation32(RegisterID, RegisterID) + { + ASSERT(!supportsCountPopulation()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void countPopulation32(RegisterID, RegisterID, FPRegisterID) + { + ASSERT(!supportsCountPopulation()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void countPopulation64(RegisterID, RegisterID) + { + ASSERT(!supportsCountPopulation()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void countPopulation64(RegisterID, RegisterID, FPRegisterID) + { + ASSERT(!supportsCountPopulation()); + CRASH(); + } + // Floating-point operations: static bool supportsFloatingPoint() { return true; } @@ -1702,6 +1775,12 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle(), /* toDouble: */ false); } + void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) + { + move(imm, dataTempRegister); + convertInt32ToDouble(dataTempRegister, dest); + } + void convertInt32ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.vmov(fpTempRegister, src, src); @@ -2153,6 +2232,12 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { { ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); } + + template + static void replaceWithNops(CodeLocationLabel instructionStart, size_t memoryToFillWithNopsInBytes) + { + ARMv7Assembler::replaceWithNops(instructionStart.dataLocation(), memoryToFillWithNopsInBytes); + } static ptrdiff_t maxJumpReplacementSize() { @@ -2340,6 +2425,12 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { return branch32(cond, addressTempRegister, right); } + Jump branch32WithMemory16(RelationalCondition cond, Address left, RegisterID right) + { + MacroAssemblerHelpers::load16OnCondition(*this, cond, left, addressTempRegister); + return branch32(cond, addressTempRegister, right); + } + Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) { load32(left.m_ptr, addressTempRegister); @@ -2655,7 +2746,14 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { void breakpoint(uint8_t imm = 0) { - m_assembler.bkpt(imm); + m_assembler.udf(imm); + } + + void setCarry(RegisterID dest) + { + m_assembler.it(ARMv7Assembler::ConditionCS, false); + move(TrustedImm32(1), dest); + move(TrustedImm32(0), dest); } static bool isBreakpoint(void* address) { return ARMv7Assembler::isBkpt(address); } @@ -2709,10 +2807,11 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { ALWAYS_INLINE Call call(RegisterID target, RegisterID callTag) { return UNUSED_PARAM(callTag), call(target, NoPtrTag); } ALWAYS_INLINE Call call(Address address, RegisterID callTag) { return UNUSED_PARAM(callTag), call(address, NoPtrTag); } - ALWAYS_INLINE void callOperation(const CodePtr operation) + template + ALWAYS_INLINE void callOperation(const CodePtr operation) { move(TrustedImmPtr(operation.taggedPtr()), addressTempRegister); - call(addressTempRegister, OperationPtrTag); + call(addressTempRegister, tag); } ALWAYS_INLINE void ret() @@ -2810,6 +2909,18 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); } + void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + auto passCase = branch32(cond, left, right); + move(elseCase, dest); + auto done = jump(); + + passCase.link(this); + move(thenCase, dest); + + done.link(this); + } + ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst) { padBeforePatch(); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp index 6425da0e..d48773ce 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp @@ -55,7 +55,7 @@ CString MacroAssemblerCodeRefBase::disassembly(CodePtr codePt bool shouldDumpDisassemblyFor(CodeBlock* codeBlock) { - if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly()) + if (codeBlock && JSC::JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly()) return true; return Options::dumpDisassembly(); } diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.cpp deleted file mode 100644 index 2eb686fd..00000000 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.cpp +++ /dev/null @@ -1,578 +0,0 @@ -/* - * Copyright (C) 2013-2021 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" - -#if ENABLE(ASSEMBLER) && CPU(MIPS) -#include "MacroAssembler.h" - -#include "ProbeContext.h" -#include -#include - -namespace JSC { - -JSC_DECLARE_JIT_OPERATION(ctiMasmProbeTrampoline, void, ()); -JSC_ANNOTATE_JIT_OPERATION_PROBE(ctiMasmProbeTrampoline); - -using namespace MIPSRegisters; - -#if COMPILER(GCC_COMPATIBLE) - -// The following are offsets for Probe::State fields accessed -// by the ctiMasmProbeTrampoline stub. - -#define PTR_SIZE 4 -#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) -#define PROBE_ARG_OFFSET (1 * PTR_SIZE) -#define PROBE_INIT_STACK_FUNCTION_OFFSET (2 * PTR_SIZE) -#define PROBE_INIT_STACK_ARG_OFFSET (3 * PTR_SIZE) - -#define PROBE_INSTRUCTIONS_AFTER_CALL 2 - -#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE) - -#define GPREG_SIZE 4 -#define PROBE_CPU_ZR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) -#define PROBE_CPU_AT_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) -#define PROBE_CPU_V0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) -#define PROBE_CPU_V1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) -#define PROBE_CPU_A0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) -#define PROBE_CPU_A1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) -#define PROBE_CPU_A2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) -#define PROBE_CPU_A3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) -#define PROBE_CPU_T0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) -#define PROBE_CPU_T1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) -#define PROBE_CPU_T2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) -#define PROBE_CPU_T3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) -#define PROBE_CPU_T4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) -#define PROBE_CPU_T5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) -#define PROBE_CPU_T6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) -#define PROBE_CPU_T7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) -#define PROBE_CPU_S0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) -#define PROBE_CPU_S1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) -#define PROBE_CPU_S2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) -#define PROBE_CPU_S3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (19 * GPREG_SIZE)) -#define PROBE_CPU_S4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (20 * GPREG_SIZE)) -#define PROBE_CPU_S5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (21 * GPREG_SIZE)) -#define PROBE_CPU_S6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (22 * GPREG_SIZE)) -#define PROBE_CPU_S7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (23 * GPREG_SIZE)) -#define PROBE_CPU_T8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (24 * GPREG_SIZE)) -#define PROBE_CPU_T9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (25 * GPREG_SIZE)) -#define PROBE_CPU_K0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (26 * GPREG_SIZE)) -#define PROBE_CPU_K1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (27 * GPREG_SIZE)) -#define PROBE_CPU_GP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (28 * GPREG_SIZE)) -#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (29 * GPREG_SIZE)) -#define PROBE_CPU_FP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (30 * GPREG_SIZE)) -#define PROBE_CPU_RA_OFFSET (PROBE_FIRST_GPREG_OFFSET + (31 * GPREG_SIZE)) - -#define PROBE_FIRST_SPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (32 * GPREG_SIZE)) - -#define PROBE_CPU_FIR_OFFSET (PROBE_FIRST_SPREG_OFFSET + (0 * GPREG_SIZE)) -#define PROBE_CPU_FCCR_OFFSET (PROBE_FIRST_SPREG_OFFSET + (25 * GPREG_SIZE)) -#define PROBE_CPU_FEXR_OFFSET (PROBE_FIRST_SPREG_OFFSET + (26 * GPREG_SIZE)) -#define PROBE_CPU_FENR_OFFSET (PROBE_FIRST_SPREG_OFFSET + (28 * GPREG_SIZE)) -#define PROBE_CPU_FCSR_OFFSET (PROBE_FIRST_SPREG_OFFSET + (31 * GPREG_SIZE)) -#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_SPREG_OFFSET + (32 * GPREG_SIZE)) - -#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_SPREG_OFFSET + (34 * GPREG_SIZE)) - -#define FPREG_SIZE 8 -#define PROBE_CPU_F0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) -#define PROBE_CPU_F1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) -#define PROBE_CPU_F2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) -#define PROBE_CPU_F3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) -#define PROBE_CPU_F4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) -#define PROBE_CPU_F5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) -#define PROBE_CPU_F6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) -#define PROBE_CPU_F7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) -#define PROBE_CPU_F8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) -#define PROBE_CPU_F9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) -#define PROBE_CPU_F10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) -#define PROBE_CPU_F11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) -#define PROBE_CPU_F12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) -#define PROBE_CPU_F13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) -#define PROBE_CPU_F14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) -#define PROBE_CPU_F15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) -#define PROBE_CPU_F16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) -#define PROBE_CPU_F17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) -#define PROBE_CPU_F18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) -#define PROBE_CPU_F19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE)) -#define PROBE_CPU_F20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE)) -#define PROBE_CPU_F21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE)) -#define PROBE_CPU_F22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE)) -#define PROBE_CPU_F23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE)) -#define PROBE_CPU_F24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE)) -#define PROBE_CPU_F25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE)) -#define PROBE_CPU_F26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE)) -#define PROBE_CPU_F27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE)) -#define PROBE_CPU_F28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE)) -#define PROBE_CPU_F29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE)) -#define PROBE_CPU_F30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) -#define PROBE_CPU_F31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) - -#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) - -#define SAVED_PROBE_RETURN_PC_OFFSET (PROBE_SIZE + (0 * PTR_SIZE)) -#define PROBE_SIZE_PLUS_EXTRAS (PROBE_SIZE + (2 * PTR_SIZE)) -// PROBE_SIZE_PLUS_EXTRAS = PROBE_SIZE + SAVED_PROBE_RETURN_PC + padding - -#define FIR 0 -#define FCCR 25 -#define FEXR 26 -#define FENR 28 -#define FCSR 31 - -// These ASSERTs remind you that if you change the layout of Probe::State, -// you need to change ctiMasmProbeTrampoline offsets above to match. -#define PROBE_OFFSETOF(x) offsetof(struct Probe::State, x) -static_assert(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, "Probe::State::probeFunction's offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(arg) == PROBE_ARG_OFFSET, "Probe::State::arg's offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(initializeStackFunction) == PROBE_INIT_STACK_FUNCTION_OFFSET, "Probe::State::initializeStackFunction's offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(initializeStackArg) == PROBE_INIT_STACK_ARG_OFFSET, "Probe::State::initializeStackArg's offset matches ctiMasmProbeTrampoline"); - -static_assert(!(PROBE_CPU_ZR_OFFSET & 0x3), "Probe::State::cpu.gprs[zero]'s offset should be 4 byte aligned"); - -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::zero]) == PROBE_CPU_ZR_OFFSET, "Probe::State::cpu.gprs[zero]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::at]) == PROBE_CPU_AT_OFFSET, "Probe::State::cpu.gprs[at]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::v0]) == PROBE_CPU_V0_OFFSET, "Probe::State::cpu.gprs[v0]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::v1]) == PROBE_CPU_V1_OFFSET, "Probe::State::cpu.gprs[v1]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::a0]) == PROBE_CPU_A0_OFFSET, "Probe::State::cpu.gprs[a0]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::a1]) == PROBE_CPU_A1_OFFSET, "Probe::State::cpu.gprs[a1]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::a2]) == PROBE_CPU_A2_OFFSET, "Probe::State::cpu.gprs[a2]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::a3]) == PROBE_CPU_A3_OFFSET, "Probe::State::cpu.gprs[a3]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t0]) == PROBE_CPU_T0_OFFSET, "Probe::State::cpu.gprs[t0]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t1]) == PROBE_CPU_T1_OFFSET, "Probe::State::cpu.gprs[t1]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t2]) == PROBE_CPU_T2_OFFSET, "Probe::State::cpu.gprs[t2]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t3]) == PROBE_CPU_T3_OFFSET, "Probe::State::cpu.gprs[t3]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t4]) == PROBE_CPU_T4_OFFSET, "Probe::State::cpu.gprs[t4]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t5]) == PROBE_CPU_T5_OFFSET, "Probe::State::cpu.gprs[t5]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t6]) == PROBE_CPU_T6_OFFSET, "Probe::State::cpu.gprs[t6]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t7]) == PROBE_CPU_T7_OFFSET, "Probe::State::cpu.gprs[t7]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s0]) == PROBE_CPU_S0_OFFSET, "Probe::State::cpu.gprs[s0]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s1]) == PROBE_CPU_S1_OFFSET, "Probe::State::cpu.gprs[s1]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s2]) == PROBE_CPU_S2_OFFSET, "Probe::State::cpu.gprs[s2]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s3]) == PROBE_CPU_S3_OFFSET, "Probe::State::cpu.gprs[s3]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s4]) == PROBE_CPU_S4_OFFSET, "Probe::State::cpu.gprs[s4]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s5]) == PROBE_CPU_S5_OFFSET, "Probe::State::cpu.gprs[s5]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s6]) == PROBE_CPU_S6_OFFSET, "Probe::State::cpu.gprs[s6]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::s7]) == PROBE_CPU_S7_OFFSET, "Probe::State::cpu.gprs[s7]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t8]) == PROBE_CPU_T8_OFFSET, "Probe::State::cpu.gprs[t8]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::t9]) == PROBE_CPU_T9_OFFSET, "Probe::State::cpu.gprs[t9]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::k0]) == PROBE_CPU_K0_OFFSET, "Probe::State::cpu.gprs[k0]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::k1]) == PROBE_CPU_K1_OFFSET, "Probe::State::cpu.gprs[k1]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::gp]) == PROBE_CPU_GP_OFFSET, "Probe::State::cpu.gprs[gp]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::sp]) == PROBE_CPU_SP_OFFSET, "Probe::State::cpu.gprs[sp]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::fp]) == PROBE_CPU_FP_OFFSET, "Probe::State::cpu.gprs[fp]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.gprs[MIPSRegisters::ra]) == PROBE_CPU_RA_OFFSET, "Probe::State::cpu.gprs[ra]'s offset matches ctiMasmProbeTrampoline"); - -static_assert(PROBE_OFFSETOF(cpu.sprs[MIPSRegisters::fir]) == PROBE_CPU_FIR_OFFSET, "Probe::State::cpu.sprs[fir]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.sprs[MIPSRegisters::fccr]) == PROBE_CPU_FCCR_OFFSET, "Probe::State::cpu.sprs[fccr]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.sprs[MIPSRegisters::fexr]) == PROBE_CPU_FEXR_OFFSET, "Probe::State::cpu.sprs[fexr]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.sprs[MIPSRegisters::fenr]) == PROBE_CPU_FENR_OFFSET, "Probe::State::cpu.sprs[fenr]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.sprs[MIPSRegisters::fcsr]) == PROBE_CPU_FCSR_OFFSET, "Probe::State::cpu.sprs[fcsr]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.sprs[MIPSRegisters::pc]) == PROBE_CPU_PC_OFFSET, "Probe::State::cpu.sprs[pc]'s offset matches ctiMasmProbeTrampoline"); - -static_assert(!(PROBE_CPU_F0_OFFSET & 0x7), "Probe::State::cpu.fprs[f0]'s offset should be 8 byte aligned"); - -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f0]) == PROBE_CPU_F0_OFFSET, "Probe::State::cpu.fprs[f0]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f1]) == PROBE_CPU_F1_OFFSET, "Probe::State::cpu.fprs[f1]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f2]) == PROBE_CPU_F2_OFFSET, "Probe::State::cpu.fprs[f2]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f3]) == PROBE_CPU_F3_OFFSET, "Probe::State::cpu.fprs[f3]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f4]) == PROBE_CPU_F4_OFFSET, "Probe::State::cpu.fprs[f4]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f5]) == PROBE_CPU_F5_OFFSET, "Probe::State::cpu.fprs[f5]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f6]) == PROBE_CPU_F6_OFFSET, "Probe::State::cpu.fprs[f6]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f7]) == PROBE_CPU_F7_OFFSET, "Probe::State::cpu.fprs[f7]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f8]) == PROBE_CPU_F8_OFFSET, "Probe::State::cpu.fprs[f8]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f9]) == PROBE_CPU_F9_OFFSET, "Probe::State::cpu.fprs[f9]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f10]) == PROBE_CPU_F10_OFFSET, "Probe::State::cpu.fprs[f10]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f11]) == PROBE_CPU_F11_OFFSET, "Probe::State::cpu.fprs[f11]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f12]) == PROBE_CPU_F12_OFFSET, "Probe::State::cpu.fprs[f12]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f13]) == PROBE_CPU_F13_OFFSET, "Probe::State::cpu.fprs[f13]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f14]) == PROBE_CPU_F14_OFFSET, "Probe::State::cpu.fprs[f14]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f15]) == PROBE_CPU_F15_OFFSET, "Probe::State::cpu.fprs[f15]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f16]) == PROBE_CPU_F16_OFFSET, "Probe::State::cpu.fprs[f16]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f17]) == PROBE_CPU_F17_OFFSET, "Probe::State::cpu.fprs[f17]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f18]) == PROBE_CPU_F18_OFFSET, "Probe::State::cpu.fprs[f18]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f19]) == PROBE_CPU_F19_OFFSET, "Probe::State::cpu.fprs[f19]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f20]) == PROBE_CPU_F20_OFFSET, "Probe::State::cpu.fprs[f20]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f21]) == PROBE_CPU_F21_OFFSET, "Probe::State::cpu.fprs[f21]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f22]) == PROBE_CPU_F22_OFFSET, "Probe::State::cpu.fprs[f22]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f23]) == PROBE_CPU_F23_OFFSET, "Probe::State::cpu.fprs[f23]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f24]) == PROBE_CPU_F24_OFFSET, "Probe::State::cpu.fprs[f24]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f25]) == PROBE_CPU_F25_OFFSET, "Probe::State::cpu.fprs[f25]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f26]) == PROBE_CPU_F26_OFFSET, "Probe::State::cpu.fprs[f26]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f27]) == PROBE_CPU_F27_OFFSET, "Probe::State::cpu.fprs[f27]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f28]) == PROBE_CPU_F28_OFFSET, "Probe::State::cpu.fprs[f28]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f29]) == PROBE_CPU_F29_OFFSET, "Probe::State::cpu.fprs[f29]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f30]) == PROBE_CPU_F30_OFFSET, "Probe::State::cpu.fprs[f30]'s offset matches ctiMasmProbeTrampoline"); -static_assert(PROBE_OFFSETOF(cpu.fprs.fprs[MIPSRegisters::f31]) == PROBE_CPU_F31_OFFSET, "Probe::State::cpu.fprs[f31]'s offset matches ctiMasmProbeTrampoline"); - -static_assert(sizeof(Probe::State) == PROBE_SIZE, "Probe::State's size matches ctiMasmProbeTrampoline"); -#undef PROBE_OFFSETOF - -static_assert(MIPSRegisters::fir == FIR, "FIR matches MIPSRegisters::fir"); -static_assert(MIPSRegisters::fccr == FCCR, "FCCR matches MIPSRegisters::fccr"); -static_assert(MIPSRegisters::fexr == FEXR, "FEXR matches MIPSRegisters::fexr"); -static_assert(MIPSRegisters::fenr == FENR, "FENR matches MIPSRegisters::fenr"); -static_assert(MIPSRegisters::fcsr == FCSR, "FCSR matches MIPSRegisters::fcsr"); - -struct IncomingRecord { - uintptr_t a0; - uintptr_t a1; - uintptr_t a2; - uintptr_t s0; - uintptr_t s1; - uintptr_t ra; -}; - -#define IN_A0_OFFSET (0 * PTR_SIZE) -#define IN_A1_OFFSET (1 * PTR_SIZE) -#define IN_A2_OFFSET (2 * PTR_SIZE) -#define IN_S0_OFFSET (3 * PTR_SIZE) -#define IN_S1_OFFSET (4 * PTR_SIZE) -#define IN_RA_OFFSET (5 * PTR_SIZE) -#define IN_SIZE (6 * PTR_SIZE) - -static_assert(IN_A0_OFFSET == offsetof(IncomingRecord, a0), "IN_A0_OFFSET is incorrect"); -static_assert(IN_A1_OFFSET == offsetof(IncomingRecord, a1), "IN_A1_OFFSET is incorrect"); -static_assert(IN_A2_OFFSET == offsetof(IncomingRecord, a2), "IN_A2_OFFSET is incorrect"); -static_assert(IN_S0_OFFSET == offsetof(IncomingRecord, s0), "IN_S0_OFFSET is incorrect"); -static_assert(IN_S1_OFFSET == offsetof(IncomingRecord, s1), "IN_S1_OFFSET is incorrect"); -static_assert(IN_RA_OFFSET == offsetof(IncomingRecord, ra), "IN_RA_OFFSET is incorrect"); -static_assert(IN_SIZE == sizeof(IncomingRecord), "IN_SIZE is incorrect"); - -struct OutgoingRecord { - uintptr_t fp; - uintptr_t ra; -}; - -#define OUT_FP_OFFSET (0 * PTR_SIZE) -#define OUT_RA_OFFSET (1 * PTR_SIZE) -#define OUT_SIZE (2 * PTR_SIZE) - -static_assert(OUT_FP_OFFSET == offsetof(OutgoingRecord, fp), "OUT_FP_OFFSET is incorrect"); -static_assert(OUT_RA_OFFSET == offsetof(OutgoingRecord, ra), "OUT_RA_OFFSET is incorrect"); -static_assert(OUT_SIZE == sizeof(OutgoingRecord), "OUT_SIZE is incorrect"); - -struct RARestorationRecord { - uintptr_t ra; - uintptr_t padding; -}; - -#define RA_RESTORATION_RA_OFFSET (0 * PTR_SIZE) -#define RA_RESTORATION_SIZE (2 * PTR_SIZE) - -static_assert(RA_RESTORATION_RA_OFFSET == offsetof(RARestorationRecord, ra), "RA_RESTORATION_RA_OFFSET is incorrect"); -static_assert(RA_RESTORATION_SIZE == sizeof(RARestorationRecord), "RA_RESTORATION_SIZE is incorrect"); -static_assert(!(sizeof(RARestorationRecord) & 0x7), "RARestorationRecord must be 8-byte aligned"); - -asm ( - ".text" "\n" - ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" - HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" - SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" - ".set push" "\n" - ".set noreorder" "\n" - ".set noat" "\n" - - // MacroAssemblerMIPS::probe() has already generated code to store some values in an - // IncomingProbeRecord. sp points to the IncomingProbeRecord. - // - // Incoming register values: - // a0: probe function - // a1: probe arg - // a2: Probe::executeJSCJITProbe - // s0: scratch, was ctiMasmProbeTrampoline - // s1: scratch - // ra: return address - - "move $s0, $sp" "\n" - "addiu $sp, $sp, -" STRINGIZE_VALUE_OF((PROBE_SIZE_PLUS_EXTRAS + OUT_SIZE)) "\n" // Set the sp to protect the Probe::State from interrupts before we initialize it. - "move $s1, $sp" "\n" - - "sw $a0, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "($sp)" "\n" // Store the probe handler function (preloaded into a0) - "sw $a1, " STRINGIZE_VALUE_OF(PROBE_ARG_OFFSET) "($sp)" "\n" // Store the probe handler arg (preloaded into a1) - - "sw $at, " STRINGIZE_VALUE_OF(PROBE_CPU_AT_OFFSET) "($sp)" "\n" - "sw $v0, " STRINGIZE_VALUE_OF(PROBE_CPU_V0_OFFSET) "($sp)" "\n" - "sw $v1, " STRINGIZE_VALUE_OF(PROBE_CPU_V1_OFFSET) "($sp)" "\n" - - "lw $v0, " STRINGIZE_VALUE_OF(IN_A0_OFFSET) "($s0)" "\n" // Load saved a0 - "lw $v1, " STRINGIZE_VALUE_OF(IN_A1_OFFSET) "($s0)" "\n" // Load saved a1 - "lw $at, " STRINGIZE_VALUE_OF(IN_A2_OFFSET) "($s0)" "\n" // Load saved a2 - "sw $v0, " STRINGIZE_VALUE_OF(PROBE_CPU_A0_OFFSET) "($sp)" "\n" // Store saved a0 - "sw $v1, " STRINGIZE_VALUE_OF(PROBE_CPU_A1_OFFSET) "($sp)" "\n" // Store saved a1 - "sw $at, " STRINGIZE_VALUE_OF(PROBE_CPU_A2_OFFSET) "($sp)" "\n" // Store saved a2 - - "sw $a3, " STRINGIZE_VALUE_OF(PROBE_CPU_A3_OFFSET) "($sp)" "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_T0_OFFSET) "($sp)" "\n" - "sw $t1, " STRINGIZE_VALUE_OF(PROBE_CPU_T1_OFFSET) "($sp)" "\n" - "sw $t2, " STRINGIZE_VALUE_OF(PROBE_CPU_T2_OFFSET) "($sp)" "\n" - "sw $t3, " STRINGIZE_VALUE_OF(PROBE_CPU_T3_OFFSET) "($sp)" "\n" - "sw $t4, " STRINGIZE_VALUE_OF(PROBE_CPU_T4_OFFSET) "($sp)" "\n" - "sw $t5, " STRINGIZE_VALUE_OF(PROBE_CPU_T5_OFFSET) "($sp)" "\n" - "sw $t6, " STRINGIZE_VALUE_OF(PROBE_CPU_T6_OFFSET) "($sp)" "\n" - "sw $t7, " STRINGIZE_VALUE_OF(PROBE_CPU_T7_OFFSET) "($sp)" "\n" - - "lw $v0, " STRINGIZE_VALUE_OF(IN_S0_OFFSET) "($s0)" "\n" // Load saved s0 - "lw $v1, " STRINGIZE_VALUE_OF(IN_S1_OFFSET) "($s0)" "\n" // Load saved s1 - "sw $v0, " STRINGIZE_VALUE_OF(PROBE_CPU_S0_OFFSET) "($sp)" "\n" // Store saved s0 - "sw $v1, " STRINGIZE_VALUE_OF(PROBE_CPU_S1_OFFSET) "($sp)" "\n" // Store saved s1 - - "sw $s2, " STRINGIZE_VALUE_OF(PROBE_CPU_S2_OFFSET) "($sp)" "\n" - "sw $s3, " STRINGIZE_VALUE_OF(PROBE_CPU_S3_OFFSET) "($sp)" "\n" - "sw $s4, " STRINGIZE_VALUE_OF(PROBE_CPU_S4_OFFSET) "($sp)" "\n" - "sw $s5, " STRINGIZE_VALUE_OF(PROBE_CPU_S5_OFFSET) "($sp)" "\n" - "sw $s6, " STRINGIZE_VALUE_OF(PROBE_CPU_S6_OFFSET) "($sp)" "\n" - "sw $s7, " STRINGIZE_VALUE_OF(PROBE_CPU_S7_OFFSET) "($sp)" "\n" - "sw $t8, " STRINGIZE_VALUE_OF(PROBE_CPU_T8_OFFSET) "($sp)" "\n" - "sw $t9, " STRINGIZE_VALUE_OF(PROBE_CPU_T9_OFFSET) "($sp)" "\n" - "sw $k0, " STRINGIZE_VALUE_OF(PROBE_CPU_K0_OFFSET) "($sp)" "\n" - "sw $k1, " STRINGIZE_VALUE_OF(PROBE_CPU_K1_OFFSET) "($sp)" "\n" - "sw $gp, " STRINGIZE_VALUE_OF(PROBE_CPU_GP_OFFSET) "($sp)" "\n" - "sw $fp, " STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "($sp)" "\n" - - "lw $v0, " STRINGIZE_VALUE_OF(IN_RA_OFFSET) "($s0)" "\n" // Load saved ra - "addiu $s0, $s0, " STRINGIZE_VALUE_OF(IN_SIZE) "\n" // Compute the sp before the probe - "sw $v0, " STRINGIZE_VALUE_OF(PROBE_CPU_RA_OFFSET) "($sp)" "\n" // Store saved ra - "sw $s0, " STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "($sp)" "\n" // Store original sp computed into s0 - - "sw $ra, " STRINGIZE_VALUE_OF(SAVED_PROBE_RETURN_PC_OFFSET) "($sp)" "\n" // Save a duplicate copy of return pc (in ra) - "addiu $ra, $ra, " STRINGIZE_VALUE_OF(PROBE_INSTRUCTIONS_AFTER_CALL * PTR_SIZE) "\n" // The PC after the probe is at 2 instructions past the return point. - "sw $ra, " STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "($sp)" "\n" - - "cfc1 $t0, $" STRINGIZE_VALUE_OF(FIR) "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FIR_OFFSET) "($sp)" "\n" - "cfc1 $t0, $" STRINGIZE_VALUE_OF(FCCR) "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FCCR_OFFSET) "($sp)" "\n" - "cfc1 $t0, $" STRINGIZE_VALUE_OF(FEXR) "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FEXR_OFFSET) "($sp)" "\n" - "cfc1 $t0, $" STRINGIZE_VALUE_OF(FENR) "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FENR_OFFSET) "($sp)" "\n" - "cfc1 $t0, $" STRINGIZE_VALUE_OF(FCSR) "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FCSR_OFFSET) "($sp)" "\n" - - "sdc1 $f0, " STRINGIZE_VALUE_OF(PROBE_CPU_F0_OFFSET) "($sp)" "\n" - "sdc1 $f2, " STRINGIZE_VALUE_OF(PROBE_CPU_F2_OFFSET) "($sp)" "\n" - "sdc1 $f4, " STRINGIZE_VALUE_OF(PROBE_CPU_F4_OFFSET) "($sp)" "\n" - "sdc1 $f6, " STRINGIZE_VALUE_OF(PROBE_CPU_F6_OFFSET) "($sp)" "\n" - "sdc1 $f8, " STRINGIZE_VALUE_OF(PROBE_CPU_F8_OFFSET) "($sp)" "\n" - "sdc1 $f10, " STRINGIZE_VALUE_OF(PROBE_CPU_F10_OFFSET) "($sp)" "\n" - "sdc1 $f12, " STRINGIZE_VALUE_OF(PROBE_CPU_F12_OFFSET) "($sp)" "\n" - "sdc1 $f14, " STRINGIZE_VALUE_OF(PROBE_CPU_F14_OFFSET) "($sp)" "\n" - "sdc1 $f16, " STRINGIZE_VALUE_OF(PROBE_CPU_F16_OFFSET) "($sp)" "\n" - "sdc1 $f18, " STRINGIZE_VALUE_OF(PROBE_CPU_F18_OFFSET) "($sp)" "\n" - "sdc1 $f20, " STRINGIZE_VALUE_OF(PROBE_CPU_F20_OFFSET) "($sp)" "\n" - "sdc1 $f22, " STRINGIZE_VALUE_OF(PROBE_CPU_F22_OFFSET) "($sp)" "\n" - "sdc1 $f24, " STRINGIZE_VALUE_OF(PROBE_CPU_F24_OFFSET) "($sp)" "\n" - "sdc1 $f26, " STRINGIZE_VALUE_OF(PROBE_CPU_F26_OFFSET) "($sp)" "\n" - "sdc1 $f28, " STRINGIZE_VALUE_OF(PROBE_CPU_F28_OFFSET) "($sp)" "\n" - "sdc1 $f30, " STRINGIZE_VALUE_OF(PROBE_CPU_F30_OFFSET) "($sp)" "\n" - - "move $a0, $sp" "\n" // Set the Probe::State* arg. - "addiu $sp, $sp, -16" "\n" // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. - "move $t9, $a2" "\n" // Probe::executeJSCJITProbe() - "jalr $t9" "\n" // Call the probe handler. - "nop" "\n" - - // Make sure the Probe::State is entirely below the result stack pointer so - // that register values are still preserved when we call the initializeStack - // function. - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "($s1)" "\n" // Result sp. - "addiu $t1, $s1, " STRINGIZE_VALUE_OF((PROBE_SIZE_PLUS_EXTRAS + OUT_SIZE)) "\n" // End of Probe::State + buffer. - "sltu $t2, $t0, $t1" "\n" - "beqz $t2, " LOCAL_LABEL_STRING(ctiMasmProbeTrampolineProbeStateIsSafe) "\n" - "nop" "\n" - - // Allocate a safe place on the stack below the result stack pointer to stash the Probe::State. - "addiu $sp, $t0, -" STRINGIZE_VALUE_OF((PROBE_SIZE_PLUS_EXTRAS + OUT_SIZE)) "\n" // Set the new sp to protect that memory from interrupts before we copy the Probe::State. - - // Copy the Probe::State to the safe place. - // Note: we have to copy from low address to higher address because we're moving the - // Probe::State to a lower address. - "move $t0, $s1" "\n" - "move $t1, $sp" "\n" - "addiu $t2, $s1, " STRINGIZE_VALUE_OF(PROBE_SIZE_PLUS_EXTRAS) "\n" - - LOCAL_LABEL_STRING(ctiMasmProbeTrampolineCopyLoop) ":" "\n" - "lw $t3, 0($t0)" "\n" - "lw $t4, 4($t0)" "\n" - "sw $t3, 0($t1)" "\n" - "sw $t4, 4($t1)" "\n" - "addiu $t0, $t0, 8" "\n" - "addiu $t1, $t1, 8" "\n" - "bne $t0, $t2, " LOCAL_LABEL_STRING(ctiMasmProbeTrampolineCopyLoop) "\n" - "nop" "\n" - - "move $s1, $sp" "\n" - - // Call initializeStackFunction if present. - LOCAL_LABEL_STRING(ctiMasmProbeTrampolineProbeStateIsSafe) ":" "\n" - "lw $t9, " STRINGIZE_VALUE_OF(PROBE_INIT_STACK_FUNCTION_OFFSET) "($s1)" "\n" - "beqz $t9, " LOCAL_LABEL_STRING(ctiMasmProbeTrampolineRestoreRegisters) "\n" - "nop" "\n" - - "move $a0, $s1" "\n" // Set the Probe::State* arg. - "jalr $t9" "\n" // Call the initializeStackFunction (loaded into t9 above). - "nop" "\n" - - LOCAL_LABEL_STRING(ctiMasmProbeTrampolineRestoreRegisters) ":" "\n" - - "move $sp, $s1" "\n" - - // To enable probes to modify register state, we copy all registers - // out of the Probe::State before returning, except for zero, k0 and k1. - - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FIR_OFFSET) "($sp)" "\n" - "ctc1 $t0, $" STRINGIZE_VALUE_OF(FIR) "\n" - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FCCR_OFFSET) "($sp)" "\n" - "ctc1 $t0, $" STRINGIZE_VALUE_OF(FCCR) "\n" - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FEXR_OFFSET) "($sp)" "\n" - "ctc1 $t0, $" STRINGIZE_VALUE_OF(FEXR) "\n" - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FENR_OFFSET) "($sp)" "\n" - "ctc1 $t0, $" STRINGIZE_VALUE_OF(FENR) "\n" - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FCSR_OFFSET) "($sp)" "\n" - "ctc1 $t0, $" STRINGIZE_VALUE_OF(FCSR) "\n" - - "ldc1 $f0, " STRINGIZE_VALUE_OF(PROBE_CPU_F0_OFFSET) "($sp)" "\n" - "ldc1 $f2, " STRINGIZE_VALUE_OF(PROBE_CPU_F2_OFFSET) "($sp)" "\n" - "ldc1 $f4, " STRINGIZE_VALUE_OF(PROBE_CPU_F4_OFFSET) "($sp)" "\n" - "ldc1 $f6, " STRINGIZE_VALUE_OF(PROBE_CPU_F6_OFFSET) "($sp)" "\n" - "ldc1 $f8, " STRINGIZE_VALUE_OF(PROBE_CPU_F8_OFFSET) "($sp)" "\n" - "ldc1 $f10, " STRINGIZE_VALUE_OF(PROBE_CPU_F10_OFFSET) "($sp)" "\n" - "ldc1 $f12, " STRINGIZE_VALUE_OF(PROBE_CPU_F12_OFFSET) "($sp)" "\n" - "ldc1 $f14, " STRINGIZE_VALUE_OF(PROBE_CPU_F14_OFFSET) "($sp)" "\n" - "ldc1 $f16, " STRINGIZE_VALUE_OF(PROBE_CPU_F16_OFFSET) "($sp)" "\n" - "ldc1 $f18, " STRINGIZE_VALUE_OF(PROBE_CPU_F18_OFFSET) "($sp)" "\n" - "ldc1 $f20, " STRINGIZE_VALUE_OF(PROBE_CPU_F20_OFFSET) "($sp)" "\n" - "ldc1 $f22, " STRINGIZE_VALUE_OF(PROBE_CPU_F22_OFFSET) "($sp)" "\n" - "ldc1 $f24, " STRINGIZE_VALUE_OF(PROBE_CPU_F24_OFFSET) "($sp)" "\n" - "ldc1 $f26, " STRINGIZE_VALUE_OF(PROBE_CPU_F26_OFFSET) "($sp)" "\n" - "ldc1 $f28, " STRINGIZE_VALUE_OF(PROBE_CPU_F28_OFFSET) "($sp)" "\n" - "ldc1 $f30, " STRINGIZE_VALUE_OF(PROBE_CPU_F30_OFFSET) "($sp)" "\n" - - "lw $at, " STRINGIZE_VALUE_OF(PROBE_CPU_AT_OFFSET) "($sp)" "\n" - "lw $v0, " STRINGIZE_VALUE_OF(PROBE_CPU_V0_OFFSET) "($sp)" "\n" - "lw $v1, " STRINGIZE_VALUE_OF(PROBE_CPU_V1_OFFSET) "($sp)" "\n" - "lw $a0, " STRINGIZE_VALUE_OF(PROBE_CPU_A0_OFFSET) "($sp)" "\n" - "lw $a1, " STRINGIZE_VALUE_OF(PROBE_CPU_A1_OFFSET) "($sp)" "\n" - "lw $a2, " STRINGIZE_VALUE_OF(PROBE_CPU_A2_OFFSET) "($sp)" "\n" - "lw $a3, " STRINGIZE_VALUE_OF(PROBE_CPU_A3_OFFSET) "($sp)" "\n" - "lw $t2, " STRINGIZE_VALUE_OF(PROBE_CPU_T2_OFFSET) "($sp)" "\n" - "lw $t3, " STRINGIZE_VALUE_OF(PROBE_CPU_T3_OFFSET) "($sp)" "\n" - "lw $t4, " STRINGIZE_VALUE_OF(PROBE_CPU_T4_OFFSET) "($sp)" "\n" - "lw $t5, " STRINGIZE_VALUE_OF(PROBE_CPU_T5_OFFSET) "($sp)" "\n" - "lw $t6, " STRINGIZE_VALUE_OF(PROBE_CPU_T6_OFFSET) "($sp)" "\n" - "lw $t7, " STRINGIZE_VALUE_OF(PROBE_CPU_T7_OFFSET) "($sp)" "\n" - "lw $s0, " STRINGIZE_VALUE_OF(PROBE_CPU_S0_OFFSET) "($sp)" "\n" - "lw $s1, " STRINGIZE_VALUE_OF(PROBE_CPU_S1_OFFSET) "($sp)" "\n" - "lw $s2, " STRINGIZE_VALUE_OF(PROBE_CPU_S2_OFFSET) "($sp)" "\n" - "lw $s3, " STRINGIZE_VALUE_OF(PROBE_CPU_S3_OFFSET) "($sp)" "\n" - "lw $s4, " STRINGIZE_VALUE_OF(PROBE_CPU_S4_OFFSET) "($sp)" "\n" - "lw $s5, " STRINGIZE_VALUE_OF(PROBE_CPU_S5_OFFSET) "($sp)" "\n" - "lw $s6, " STRINGIZE_VALUE_OF(PROBE_CPU_S6_OFFSET) "($sp)" "\n" - "lw $s7, " STRINGIZE_VALUE_OF(PROBE_CPU_S7_OFFSET) "($sp)" "\n" - "lw $t8, " STRINGIZE_VALUE_OF(PROBE_CPU_T8_OFFSET) "($sp)" "\n" - "lw $t9, " STRINGIZE_VALUE_OF(PROBE_CPU_T9_OFFSET) "($sp)" "\n" - "lw $gp, " STRINGIZE_VALUE_OF(PROBE_CPU_GP_OFFSET) "($sp)" "\n" - - // Remaining registers to restore are: t0, t1, fp, ra, sp, and pc. - - // The only way to set the pc on MIPS (from user space) is via an indirect branch - // which means we'll need a free register to do so. For our purposes, ra - // happens to be available in applications of the probe where we may want to - // continue executing at a different location (i.e. change the pc) after the probe - // returns. So, the MIPS probe implementation will allow the probe handler to - // either modify ra or pc, but not both in the same probe invocation. The probe - // mechanism ensures that we never try to modify both ra and pc with a RELEASE_ASSERT - // in Probe::executeJSCJITProbe(). - - // Determine if the probe handler changed the pc. - "lw $ra, " STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "($sp)" "\n" // preload the target sp. - "lw $t0, " STRINGIZE_VALUE_OF(SAVED_PROBE_RETURN_PC_OFFSET) "($sp)" "\n" - "lw $t1, " STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "($sp)" "\n" - "addiu $t0, $t0, " STRINGIZE_VALUE_OF(PROBE_INSTRUCTIONS_AFTER_CALL * PTR_SIZE) "\n" - "bne $t0, $t1, " LOCAL_LABEL_STRING(ctiMasmProbeTrampolineEnd) "\n" - "nop" "\n" - - // We didn't change the PC. So, let's prepare for setting a potentially new ra value. - - // 1. Make room for the RARestorationRecord. The probe site will pop this off later. - "addiu $ra, $ra, -" STRINGIZE_VALUE_OF(RA_RESTORATION_SIZE) "\n" - // 2. Store the lp value to restore at the probe return site. - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_RA_OFFSET) "($sp)" "\n" - "sw $t0, " STRINGIZE_VALUE_OF(RA_RESTORATION_RA_OFFSET) "($ra)" "\n" - // 3. Force the return ramp to return to the probe return site. - "lw $t0, " STRINGIZE_VALUE_OF(SAVED_PROBE_RETURN_PC_OFFSET) "($sp)" "\n" - "sw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "($sp)" "\n" - - LOCAL_LABEL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" - - // Fill in the OutgoingProbeRecord. - "addiu $ra, $ra, -" STRINGIZE_VALUE_OF(OUT_SIZE) "\n" - - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "($sp)" "\n" - "lw $t1, " STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "($sp)" "\n" // Set up the outgoing record so that we'll jump to the new PC. - "sw $t0, " STRINGIZE_VALUE_OF(OUT_FP_OFFSET) "($ra)" "\n" - "sw $t1, " STRINGIZE_VALUE_OF(OUT_RA_OFFSET) "($ra)" "\n" - "lw $t0, " STRINGIZE_VALUE_OF(PROBE_CPU_T0_OFFSET) "($sp)" "\n" - "lw $t1, " STRINGIZE_VALUE_OF(PROBE_CPU_T1_OFFSET) "($sp)" "\n" - "move $sp, $ra" "\n" - - // Restore the remaining registers. - "lw $fp, " STRINGIZE_VALUE_OF(OUT_FP_OFFSET) "($sp)" "\n" - "lw $ra, " STRINGIZE_VALUE_OF(OUT_RA_OFFSET) "($sp)" "\n" - "addiu $sp, $sp, " STRINGIZE_VALUE_OF(OUT_SIZE) "\n" - "jr $ra" "\n" - "nop" "\n" - ".set pop" "\n" - ".previous" "\n" -); -#endif // COMPILER(GCC_COMPATIBLE) - -void MacroAssembler::probe(Probe::Function function, void* arg, SavedFPWidth) -{ - sub32(TrustedImm32(sizeof(IncomingRecord)), sp); - store32(a0, Address(sp, offsetof(IncomingRecord, a0))); - store32(a1, Address(sp, offsetof(IncomingRecord, a1))); - store32(a2, Address(sp, offsetof(IncomingRecord, a2))); - store32(s0, Address(sp, offsetof(IncomingRecord, s0))); - store32(s1, Address(sp, offsetof(IncomingRecord, s1))); - store32(ra, Address(sp, offsetof(IncomingRecord, ra))); - move(TrustedImmPtr(reinterpret_cast(function)), a0); - move(TrustedImmPtr(arg), a1); - move(TrustedImmPtr(reinterpret_cast(Probe::executeJSCJITProbe)), a2); - move(TrustedImmPtr(reinterpret_cast(ctiMasmProbeTrampoline)), s0); - m_assembler.jalr(s0); - m_assembler.nop(); - // If you change the following instructions, be sure to update PROBE_INSTRUCTIONS_AFTER_CALL as well - load32(Address(sp, offsetof(RARestorationRecord, ra)), ra); - add32(TrustedImm32(sizeof(RARestorationRecord)), sp); -} - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) && CPU(MIPS) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h deleted file mode 100644 index 1e325505..00000000 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h +++ /dev/null @@ -1,3818 +0,0 @@ -/* - * Copyright (C) 2008-2022 Apple Inc. All rights reserved. - * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#pragma once - -#if ENABLE(ASSEMBLER) && CPU(MIPS) - -#include "MIPSAssembler.h" -#include "AbstractMacroAssembler.h" - -namespace JSC { - -using Assembler = TARGET_ASSEMBLER; - -class MacroAssemblerMIPS : public AbstractMacroAssembler { -public: - typedef MIPSRegisters::FPRegisterID FPRegisterID; - static constexpr unsigned numGPRs = 32; - static constexpr unsigned numFPRs = 32; - - static constexpr size_t nearJumpRange = 2 * GB; - - MacroAssemblerMIPS() - : m_fixedWidth(false) - { - } - - static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) - { - return value >= -2147483647 - 1 && value <= 2147483647; - } - - inline bool isPowerOf2(int32_t v) - { - return hasOneBitSet(v); - } - - inline int bitPosition(int32_t v) - { - return getLSBSet(v); - } - - // For storing immediate number - static constexpr RegisterID immTempRegister = MIPSRegisters::t0; - // For storing data loaded from the memory - static constexpr RegisterID dataTempRegister = MIPSRegisters::t1; - // For storing address base - static constexpr RegisterID addrTempRegister = MIPSRegisters::t7; - // For storing compare result - static constexpr RegisterID cmpTempRegister = MIPSRegisters::t8; - - // FP temp register - static constexpr FPRegisterID fpTempRegister = MIPSRegisters::f16; - - RegisterID scratchRegister() { return dataTempRegister; } - - static constexpr int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF; - - enum RelationalCondition { - Equal, - NotEqual, - Above, - AboveOrEqual, - Below, - BelowOrEqual, - GreaterThan, - GreaterThanOrEqual, - LessThan, - LessThanOrEqual - }; - - enum ResultCondition { - Overflow, - Signed, - PositiveOrZero, - Zero, - NonZero - }; - - enum DoubleCondition { - DoubleEqualAndOrdered, - DoubleNotEqualAndOrdered, - DoubleGreaterThanAndOrdered, - DoubleGreaterThanOrEqualAndOrdered, - DoubleLessThanAndOrdered, - DoubleLessThanOrEqualAndOrdered, - DoubleEqualOrUnordered, - DoubleNotEqualOrUnordered, - DoubleGreaterThanOrUnordered, - DoubleGreaterThanOrEqualOrUnordered, - DoubleLessThanOrUnordered, - DoubleLessThanOrEqualOrUnordered - }; - - enum class LoadAddressMode { - ScaleAndAddOffsetIfOffsetIsOutOfBounds, - Scale - }; - - static constexpr RegisterID stackPointerRegister = MIPSRegisters::sp; - static constexpr RegisterID framePointerRegister = MIPSRegisters::fp; - static constexpr RegisterID returnAddressRegister = MIPSRegisters::ra; - - // Integer arithmetic operations: - // - // Operations are typically two operand - operation(source, srcDst) - // For many operations the source may be an TrustedImm32, the srcDst operand - // may often be a memory location (explictly described using an Address - // object). - - void add32(RegisterID src, RegisterID dest) - { - m_assembler.addu(dest, dest, src); - } - - void add32(RegisterID op1, RegisterID op2, RegisterID dest) - { - m_assembler.addu(dest, op1, op2); - } - - void add32(TrustedImm32 imm, RegisterID dest) - { - add32(imm, dest, dest); - } - - void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (imm.m_value >= -32768 && imm.m_value <= 32767 - && !m_fixedWidth) { - /* - addiu dest, src, imm - */ - m_assembler.addiu(dest, src, imm.m_value); - } else { - /* - li immTemp, imm - addu dest, src, immTemp - */ - move(imm, immTempRegister); - m_assembler.addu(dest, src, immTempRegister); - } - } - - void add32(RegisterID src, TrustedImm32 imm, RegisterID dest) - { - add32(imm, src, dest); - } - - void add32(TrustedImm32 imm, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - /* - lw dataTemp, offset(base) - li immTemp, imm - addu dataTemp, dataTemp, immTemp - sw dataTemp, offset(base) - */ - m_assembler.lw(dataTempRegister, address.base, address.offset); - if (imm.m_value >= -32768 && imm.m_value <= 32767 - && !m_fixedWidth) - m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, address.base, address.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lw dataTemp, (offset & 0xffff)(addrTemp) - li immtemp, imm - addu dataTemp, dataTemp, immTemp - sw dataTemp, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lw(dataTempRegister, addrTempRegister, address.offset); - - if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth) - m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, address.offset); - } - } - - void add32(Address src, RegisterID dest) - { - load32(src, dataTempRegister); - add32(dataTempRegister, dest); - } - - void add32(AbsoluteAddress src, RegisterID dest) - { - load32(src.m_ptr, dataTempRegister); - add32(dataTempRegister, dest); - } - - void add32(RegisterID src, Address dest) - { - if (dest.offset >= -32768 && dest.offset <= 32767 && !m_fixedWidth) { - /* - lw dataTemp, offset(base) - addu dataTemp, dataTemp, src - sw dataTemp, offset(base) - */ - m_assembler.lw(dataTempRegister, dest.base, dest.offset); - m_assembler.addu(dataTempRegister, dataTempRegister, src); - m_assembler.sw(dataTempRegister, dest.base, dest.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lw dataTemp, (offset & 0xffff)(addrTemp) - addu dataTemp, dataTemp, src - sw dataTemp, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (dest.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, dest.base); - m_assembler.lw(dataTempRegister, addrTempRegister, dest.offset); - m_assembler.addu(dataTempRegister, dataTempRegister, src); - m_assembler.sw(dataTempRegister, addrTempRegister, dest.offset); - } - } - - void add32(TrustedImm32 imm, AbsoluteAddress address) - { - if (!m_fixedWidth) { - uintptr_t adr = reinterpret_cast(address.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(cmpTempRegister, addrTempRegister, adr & 0xffff); - if (imm.m_value >= -32768 && imm.m_value <= 32767) - m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - } else { - /* - li addrTemp, address - li immTemp, imm - lw cmpTemp, 0(addrTemp) - addu dataTemp, cmpTemp, immTemp - sw dataTemp, 0(addrTemp) - */ - move(TrustedImmPtr(address.m_ptr), addrTempRegister); - m_assembler.lw(cmpTempRegister, addrTempRegister, 0); - move(imm, immTempRegister); - m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister); - m_assembler.sw(dataTempRegister, addrTempRegister, 0); - } - } - - void add64(TrustedImm32 imm, AbsoluteAddress address) - { - if (!m_fixedWidth) { - uintptr_t adr = reinterpret_cast(address.m_ptr); - if ((adr >> 15) == ((adr + 4) >> 15)) { - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(cmpTempRegister, addrTempRegister, adr & 0xffff); - if (imm.m_value >= -32768 && imm.m_value <= 32767) - m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - m_assembler.sltu(immTempRegister, dataTempRegister, cmpTempRegister); - m_assembler.lw(dataTempRegister, addrTempRegister, (adr + 4) & 0xffff); - if (imm.m_value >> 31) - m_assembler.addiu(dataTempRegister, dataTempRegister, -1); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - m_assembler.sw(dataTempRegister, addrTempRegister, (adr + 4) & 0xffff); - return; - } - } - /* - add32(imm, address) - sltu immTemp, dataTemp, cmpTemp # set carry-in bit - lw dataTemp, 4(addrTemp) - addiu dataTemp, imm.m_value >> 31 ? -1 : 0 - addu dataTemp, dataTemp, immTemp - sw dataTemp, 4(addrTemp) - */ - move(TrustedImmPtr(address.m_ptr), addrTempRegister); - m_assembler.lw(cmpTempRegister, addrTempRegister, 0); - if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth) - m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, 0); - m_assembler.sltu(immTempRegister, dataTempRegister, cmpTempRegister); - m_assembler.lw(dataTempRegister, addrTempRegister, 4); - if (imm.m_value >> 31) - m_assembler.addiu(dataTempRegister, dataTempRegister, -1); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - m_assembler.sw(dataTempRegister, addrTempRegister, 4); - } - - void loadAddress(BaseIndex address, LoadAddressMode mode) - { - if (mode == LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds) { - if (!address.scale) - m_assembler.addu(addrTempRegister, address.index, address.base); - else { - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - } - if (address.offset < -32768 || address.offset > 32767) { - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - } - } else { - if (!address.scale) - m_assembler.addu(addrTempRegister, address.index, address.base); - else { - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - } - } - } - - void getEffectiveAddress(BaseIndex address, RegisterID dest) - { - if (!address.scale && !m_fixedWidth) - m_assembler.addu(dest, address.index, address.base); - else { - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(dest, addrTempRegister, address.base); - } - if (address.offset) - add32(TrustedImm32(address.offset), dest); - } - - void and16(Address src, RegisterID dest) - { - load16(src, dataTempRegister); - and32(dataTempRegister, dest); - } - - void and32(Address src, RegisterID dest) - { - load32(src, dataTempRegister); - and32(dataTempRegister, dest); - } - - void and32(RegisterID src, RegisterID dest) - { - m_assembler.andInsn(dest, dest, src); - } - - void and32(RegisterID op1, RegisterID op2, RegisterID dest) - { - m_assembler.andInsn(dest, op1, op2); - } - - void and32(TrustedImm32 imm, RegisterID dest) - { - if (!imm.m_value && !m_fixedWidth) - move(MIPSRegisters::zero, dest); - else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) - m_assembler.andi(dest, dest, imm.m_value); - else { - /* - li immTemp, imm - and dest, dest, immTemp - */ - move(imm, immTempRegister); - m_assembler.andInsn(dest, dest, immTempRegister); - } - } - - void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (!imm.m_value && !m_fixedWidth) - move(MIPSRegisters::zero, dest); - else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) - m_assembler.andi(dest, src, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.andInsn(dest, src, immTempRegister); - } - } - - void countLeadingZeros32(RegisterID src, RegisterID dest) - { -#if WTF_MIPS_ISA_AT_LEAST(32) - m_assembler.clz(dest, src); -#else - static_assert(false, "CLZ opcode is not available for this ISA"); -#endif - } - - void lshift32(RegisterID shiftAmount, RegisterID dest) - { - m_assembler.sllv(dest, dest, shiftAmount); - } - - void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) - { - m_assembler.sllv(dest, src, shiftAmount); - } - - void lshift32(TrustedImm32 imm, RegisterID dest) - { - m_assembler.sll(dest, dest, imm.m_value); - } - - void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) - { - m_assembler.sll(dest, src, imm.m_value); - } - - void mul32(RegisterID src, RegisterID dest) - { - m_assembler.mul(dest, dest, src); - } - - void mul32(RegisterID op1, RegisterID op2, RegisterID dest) - { - m_assembler.mul(dest, op1, op2); - } - - void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (!imm.m_value && !m_fixedWidth) - move(MIPSRegisters::zero, dest); - else if (imm.m_value == 1 && !m_fixedWidth) - move(src, dest); - else { - /* - li dataTemp, imm - mul dest, src, dataTemp - */ - move(imm, dataTempRegister); - m_assembler.mul(dest, src, dataTempRegister); - } - } - - void neg32(RegisterID srcDest) - { - m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest); - } - - void neg32(RegisterID src, RegisterID dest) - { - m_assembler.subu(dest, MIPSRegisters::zero, src); - } - - - void or8(TrustedImm32 imm, AbsoluteAddress dest) - { - if (!imm.m_value && !m_fixedWidth) - return; - - if (m_fixedWidth) { - load8(dest.m_ptr, immTempRegister); - or32(imm, immTempRegister); - store8(immTempRegister, dest.m_ptr); - } else { - uintptr_t adr = reinterpret_cast(dest.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lbu(immTempRegister, addrTempRegister, adr & 0xffff); - or32(imm, immTempRegister); - m_assembler.sb(immTempRegister, addrTempRegister, adr & 0xffff); - } - } - - void or16(TrustedImm32 imm, AbsoluteAddress dest) - { - if (!imm.m_value && !m_fixedWidth) - return; - - if (m_fixedWidth) { - // TODO: Swap dataTempRegister and immTempRegister usage - load16(dest.m_ptr, immTempRegister); - or32(imm, immTempRegister); - store16(immTempRegister, dest.m_ptr); - } else { - uintptr_t adr = reinterpret_cast(dest.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lhu(immTempRegister, addrTempRegister, adr & 0xffff); - or32(imm, immTempRegister); - m_assembler.sh(immTempRegister, addrTempRegister, adr & 0xffff); - } - } - - void or16(RegisterID mask, AbsoluteAddress dest) - { - load16(dest.m_ptr, immTempRegister); - or32(mask, immTempRegister); - store16(immTempRegister, dest.m_ptr); - } - - void or32(RegisterID src, RegisterID dest) - { - m_assembler.orInsn(dest, dest, src); - } - - void or32(RegisterID op1, RegisterID op2, RegisterID dest) - { - m_assembler.orInsn(dest, op1, op2); - } - - void or32(TrustedImm32 imm, AbsoluteAddress dest) - { - if (!imm.m_value && !m_fixedWidth) - return; - - if (m_fixedWidth) { - // TODO: Swap dataTempRegister and immTempRegister usage - load32(dest.m_ptr, immTempRegister); - or32(imm, immTempRegister); - store32(immTempRegister, dest.m_ptr); - } else { - uintptr_t adr = reinterpret_cast(dest.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(immTempRegister, addrTempRegister, adr & 0xffff); - or32(imm, immTempRegister); - m_assembler.sw(immTempRegister, addrTempRegister, adr & 0xffff); - } - } - - void or32(TrustedImm32 imm, RegisterID dest) - { - if (!imm.m_value && !m_fixedWidth) - return; - - if (imm.m_value > 0 && imm.m_value <= 65535 - && !m_fixedWidth) { - m_assembler.ori(dest, dest, imm.m_value); - return; - } - - /* - li dataTemp, imm - or dest, dest, dataTemp - */ - move(imm, dataTempRegister); - m_assembler.orInsn(dest, dest, dataTempRegister); - } - - void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (!imm.m_value && !m_fixedWidth) { - move(src, dest); - return; - } - - if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) { - m_assembler.ori(dest, src, imm.m_value); - return; - } - - /* - li dataTemp, imm - or dest, src, dataTemp - */ - move(imm, dataTempRegister); - m_assembler.orInsn(dest, src, dataTempRegister); - } - - void or32(RegisterID src, AbsoluteAddress dest) - { - if (m_fixedWidth) { - load32(dest.m_ptr, dataTempRegister); - m_assembler.orInsn(dataTempRegister, dataTempRegister, src); - store32(dataTempRegister, dest.m_ptr); - } else { - uintptr_t adr = reinterpret_cast(dest.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(dataTempRegister, addrTempRegister, adr & 0xffff); - m_assembler.orInsn(dataTempRegister, dataTempRegister, src); - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - } - } - - void or32(TrustedImm32 imm, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - /* - lw dataTemp, offset(base) - li immTemp, imm - or dataTemp, dataTemp, immTemp - sw dataTemp, offset(base) - */ - m_assembler.lw(dataTempRegister, address.base, address.offset); - if (imm.m_value >= 0 && imm.m_value <= 65535 && !m_fixedWidth) - m_assembler.ori(dataTempRegister, dataTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.orInsn(dataTempRegister, dataTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, address.base, address.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lw dataTemp, (offset & 0xffff)(addrTemp) - li immtemp, imm - or dataTemp, dataTemp, immTemp - sw dataTemp, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lw(dataTempRegister, addrTempRegister, address.offset); - - if (imm.m_value >= 0 && imm.m_value <= 65535 && !m_fixedWidth) - m_assembler.ori(dataTempRegister, dataTempRegister, imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.orInsn(dataTempRegister, dataTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, address.offset); - } - } - - // This is only referenced by code intented for ARM64_32. - void rotateRight32(TrustedImm32, RegisterID) - { - UNREACHABLE_FOR_PLATFORM(); - } - - void rshift32(RegisterID shiftAmount, RegisterID dest) - { - m_assembler.srav(dest, dest, shiftAmount); - } - - void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) - { - m_assembler.srav(dest, src, shiftAmount); - } - - void rshift32(TrustedImm32 imm, RegisterID dest) - { - m_assembler.sra(dest, dest, imm.m_value); - } - - void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) - { - m_assembler.sra(dest, src, imm.m_value); - } - - void urshift32(RegisterID shiftAmount, RegisterID dest) - { - m_assembler.srlv(dest, dest, shiftAmount); - } - - void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) - { - m_assembler.srlv(dest, src, shiftAmount); - } - - void urshift32(TrustedImm32 imm, RegisterID dest) - { - m_assembler.srl(dest, dest, imm.m_value); - } - - void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) - { - m_assembler.srl(dest, src, imm.m_value); - } - - void sub32(RegisterID src, RegisterID dest) - { - m_assembler.subu(dest, dest, src); - } - - void sub32(RegisterID op1, RegisterID op2, RegisterID dest) - { - m_assembler.subu(dest, op1, op2); - } - - void sub32(TrustedImm32 imm, RegisterID dest) - { - if (imm.m_value >= -32767 && imm.m_value <= 32768 - && !m_fixedWidth) { - /* - addiu dest, src, imm - */ - m_assembler.addiu(dest, dest, -imm.m_value); - } else { - /* - li immTemp, imm - subu dest, src, immTemp - */ - move(imm, immTempRegister); - m_assembler.subu(dest, dest, immTempRegister); - } - } - - void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest) - { - if (imm.m_value >= -32767 && imm.m_value <= 32768 - && !m_fixedWidth) { - /* - addiu dest, src, imm - */ - m_assembler.addiu(dest, src, -imm.m_value); - } else { - /* - li immTemp, imm - subu dest, src, immTemp - */ - move(imm, immTempRegister); - m_assembler.subu(dest, src, immTempRegister); - } - } - - void sub32(TrustedImm32 imm, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - /* - lw dataTemp, offset(base) - li immTemp, imm - subu dataTemp, dataTemp, immTemp - sw dataTemp, offset(base) - */ - m_assembler.lw(dataTempRegister, address.base, address.offset); - if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth) - m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, address.base, address.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lw dataTemp, (offset & 0xffff)(addrTemp) - li immtemp, imm - subu dataTemp, dataTemp, immTemp - sw dataTemp, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lw(dataTempRegister, addrTempRegister, address.offset); - - if (imm.m_value >= -32767 && imm.m_value <= 32768 - && !m_fixedWidth) - m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, address.offset); - } - } - - void sub32(Address src, RegisterID dest) - { - load32(src, dataTempRegister); - sub32(dataTempRegister, dest); - } - - void sub32(TrustedImm32 imm, AbsoluteAddress address) - { - if (!m_fixedWidth) { - uintptr_t adr = reinterpret_cast(address.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(cmpTempRegister, addrTempRegister, adr & 0xffff); - if (imm.m_value >= -32767 && imm.m_value <= 32768) - m_assembler.addiu(dataTempRegister, cmpTempRegister, -imm.m_value); - else { - move(imm, immTempRegister); - m_assembler.subu(dataTempRegister, cmpTempRegister, immTempRegister); - } - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - } else { - /* - li addrTemp, address - lw dataTemp, 0(addrTemp) - li immTemp, imm - subu dataTemp, dataTemp, immTemp - sw dataTemp, 0(addrTemp) - */ - move(TrustedImmPtr(address.m_ptr), addrTempRegister); - m_assembler.lw(cmpTempRegister, addrTempRegister, 0); - move(imm, immTempRegister); - m_assembler.subu(dataTempRegister, cmpTempRegister, immTempRegister); - m_assembler.sw(dataTempRegister, addrTempRegister, 0); - } - } - - void xor32(RegisterID src, RegisterID dest) - { - m_assembler.xorInsn(dest, dest, src); - } - - void xor32(RegisterID op1, RegisterID op2, RegisterID dest) - { - m_assembler.xorInsn(dest, op1, op2); - } - - void xor32(Address src, RegisterID dest) - { - load32(src, dataTempRegister); - xor32(dataTempRegister, dest); - } - - void xor32(TrustedImm32 imm, RegisterID dest) - { - if (!m_fixedWidth) { - if (imm.m_value == -1) { - m_assembler.nor(dest, dest, MIPSRegisters::zero); - return; - } - if (imm.m_value >= 0 && imm.m_value <= 65535) { - m_assembler.xori(dest, dest, imm.m_value); - return; - } - } - /* - li immTemp, imm - xor dest, dest, immTemp - */ - move(imm, immTempRegister); - m_assembler.xorInsn(dest, dest, immTempRegister); - } - - void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (!m_fixedWidth) { - if (imm.m_value == -1) { - m_assembler.nor(dest, src, MIPSRegisters::zero); - return; - } - if (imm.m_value >= 0 && imm.m_value <= 65535) { - m_assembler.xori(dest, src, imm.m_value); - return; - } - } - /* - li immTemp, imm - xor dest, src, immTemp - */ - move(imm, immTempRegister); - m_assembler.xorInsn(dest, src, immTempRegister); - } - - void not32(RegisterID srcDest) - { - m_assembler.nor(srcDest, srcDest, MIPSRegisters::zero); - } - - void sqrtDouble(FPRegisterID src, FPRegisterID dst) - { - m_assembler.sqrtd(dst, src); - } - - void absDouble(FPRegisterID src, FPRegisterID dst) - { - m_assembler.absd(dst, src); - } - - NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) - { - ASSERT(!supportsFloatingPointRounding()); - CRASH(); - } - - NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) - { - ASSERT(!supportsFloatingPointRounding()); - CRASH(); - } - - NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID) - { - ASSERT(!supportsFloatingPointRounding()); - CRASH(); - } - - // this is provided (unlike the other rounding instructions) since it is - // used in a more limited fashion (for Uint8ClampedArray)--its range is - // limited to doubles that round to a 32-bit signed int--otherwise, it will - // saturate (and signal an FP exception [which is non-trapping]) - void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.roundwd(dest, src); - m_assembler.cvtdw(dest, dest); - } - - ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) - { - ConvertibleLoadLabel result(this); - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lw dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lw(dest, addrTempRegister, address.offset); - return result; - } - - // Memory access operations: - // - // Loads are of the form load(address, destination) and stores of the form - // store(source, address). The source for a store may be an TrustedImm32. Address - // operand objects to loads and store will be implicitly constructed if a - // register is passed. - - /* Need to use zero-extened load byte for load8. */ - void load8(Address address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.lbu(dest, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lbu dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lbu(dest, addrTempRegister, address.offset); - } - } - - void load8(BaseIndex address, RegisterID dest) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lbu(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lbu dest, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lbu(dest, addrTempRegister, address.offset); - } - } - - ALWAYS_INLINE void load8(AbsoluteAddress address, RegisterID dest) - { - load8(address.m_ptr, dest); - } - - void load8(const void* address, RegisterID dest) - { - if (m_fixedWidth) { - /* - li addrTemp, address - lbu dest, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.lbu(dest, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lbu(dest, addrTempRegister, adr & 0xffff); - } - } - - void load8SignedExtendTo32(Address address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.lb(dest, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lb dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lb(dest, addrTempRegister, address.offset); - } - } - - void load8SignedExtendTo32(BaseIndex address, RegisterID dest) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lb(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lb dest, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lb(dest, addrTempRegister, address.offset); - } - } - - ALWAYS_INLINE void load8SignedExtendTo32(AbsoluteAddress address, RegisterID dest) - { - load8SignedExtendTo32(address.m_ptr, dest); - } - - void load8SignedExtendTo32(const void* address, RegisterID dest) - { - if (m_fixedWidth) { - /* - li addrTemp, address - lb dest, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.lb(dest, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lb(dest, addrTempRegister, adr & 0xffff); - } - } - - - void load32(Address address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.lw(dest, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lw dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lw(dest, addrTempRegister, address.offset); - } - } - - void load32(BaseIndex address, RegisterID dest) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lw(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lw dest, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lw(dest, addrTempRegister, address.offset); - } - } - - void load16Unaligned(BaseIndex address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) { - /* - sll addrtemp, address.index, address.scale - addu addrtemp, addrtemp, address.base - lbu immTemp, address.offset+x(addrtemp) (x=0 for LE, x=1 for BE) - lbu dest, address.offset+x(addrtemp) (x=1 for LE, x=0 for BE) - sll dest, dest, 8 - or dest, dest, immTemp - */ - loadAddress(address, LoadAddressMode::Scale); -#if CPU(BIG_ENDIAN) - m_assembler.lbu(immTempRegister, addrTempRegister, address.offset + 1); - m_assembler.lbu(dest, addrTempRegister, address.offset); -#else - m_assembler.lbu(immTempRegister, addrTempRegister, address.offset); - m_assembler.lbu(dest, addrTempRegister, address.offset + 1); -#endif - m_assembler.sll(dest, dest, 8); - m_assembler.orInsn(dest, dest, immTempRegister); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, address.offset >> 16 - ori immTemp, immTemp, address.offset & 0xffff - addu addrTemp, addrTemp, immTemp - lbu immTemp, x(addrtemp) (x=0 for LE, x=1 for BE) - lbu dest, x(addrtemp) (x=1 for LE, x=0 for BE) - sll dest, dest, 8 - or dest, dest, immTemp - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, address.offset >> 16); - m_assembler.ori(immTempRegister, immTempRegister, address.offset); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); -#if CPU(BIG_ENDIAN) - m_assembler.lbu(immTempRegister, addrTempRegister, 1); - m_assembler.lbu(dest, addrTempRegister, 0); -#else - m_assembler.lbu(immTempRegister, addrTempRegister, 0); - m_assembler.lbu(dest, addrTempRegister, 1); -#endif - m_assembler.sll(dest, dest, 8); - m_assembler.orInsn(dest, dest, immTempRegister); - } - } - - void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32764 - && !m_fixedWidth) { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - (Big-Endian) - lwl dest, address.offset(addrTemp) - lwr dest, address.offset+3(addrTemp) - (Little-Endian) - lwl dest, address.offset+3(addrTemp) - lwr dest, address.offset(addrTemp) - */ - loadAddress(address, LoadAddressMode::Scale); -#if CPU(BIG_ENDIAN) - m_assembler.lwl(dest, addrTempRegister, address.offset); - m_assembler.lwr(dest, addrTempRegister, address.offset + 3); -#else - m_assembler.lwl(dest, addrTempRegister, address.offset + 3); - m_assembler.lwr(dest, addrTempRegister, address.offset); - -#endif - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, address.offset >> 16 - ori immTemp, immTemp, address.offset & 0xffff - addu addrTemp, addrTemp, immTemp - (Big-Endian) - lw dest, 0(at) - lw dest, 3(at) - (Little-Endian) - lw dest, 3(at) - lw dest, 0(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, address.offset >> 16); - m_assembler.ori(immTempRegister, immTempRegister, address.offset); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); -#if CPU(BIG_ENDIAN) - m_assembler.lwl(dest, addrTempRegister, 0); - m_assembler.lwr(dest, addrTempRegister, 3); -#else - m_assembler.lwl(dest, addrTempRegister, 3); - m_assembler.lwr(dest, addrTempRegister, 0); -#endif - } - } - - void load32(const void* address, RegisterID dest) - { - if (m_fixedWidth) { - /* - li addrTemp, address - lw dest, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.lw(dest, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(dest, addrTempRegister, adr & 0xffff); - } - } - - void load16(const void* address, RegisterID dest) - { - if (m_fixedWidth) { - /* - li addrTemp, address - lhu dest, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.lhu(dest, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lhu(dest, addrTempRegister, adr & 0xffff); - } - } - - /* Need to use zero-extened load half-word for load16. */ - void load16(Address address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.lhu(dest, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lhu dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lhu(dest, addrTempRegister, address.offset); - } - } - - /* Need to use zero-extened load half-word for load16. */ - void load16(BaseIndex address, RegisterID dest) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lhu(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lhu dest, (address.offset & 0xffff)(addrTemp) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lhu(dest, addrTempRegister, address.offset); - } - } - - ALWAYS_INLINE void load16(AbsoluteAddress address, RegisterID dest) - { - load16(address.m_ptr, dest); - } - - /* Need to use zero-extened load half-word for load16. */ - void load16SignedExtendTo32(Address address, RegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.lh(dest, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lh dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lh(dest, addrTempRegister, address.offset); - } - } - - void load16SignedExtendTo32(BaseIndex address, RegisterID dest) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lh(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lh dest, (address.offset & 0xffff)(addrTemp) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lh(dest, addrTempRegister, address.offset); - } - } - - ALWAYS_INLINE void load16SignedExtendTo32(AbsoluteAddress address, RegisterID dest) - { - load16SignedExtendTo32(address.m_ptr, dest); - } - - void load16SignedExtendTo32(const void* address, RegisterID dest) - { - if (m_fixedWidth) { - /* - li addrTemp, address - lh dest, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.lh(dest, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lh(dest, addrTempRegister, adr & 0xffff); - } - } - - - void loadPair32(RegisterID src, RegisterID dest1, RegisterID dest2) - { - loadPair32(src, TrustedImm32(0), dest1, dest2); - } - - void loadPair32(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2) - { - loadPair32(Address(src, offset.m_value), dest1, dest2); - } - - void loadPair32(Address address, RegisterID dest1, RegisterID dest2) - { - ASSERT(dest1 != dest2); // If it is the same, ldp becomes illegal instruction. - if (address.base == dest1) { - load32(address.withOffset(4), dest2); - load32(address, dest1); - } else { - load32(address, dest1); - load32(address.withOffset(4), dest2); - } - } - - void loadPair32(AbsoluteAddress address, RegisterID dest1, RegisterID dest2) - { - move(TrustedImmPtr(address.m_ptr), addrTempRegister); - loadPair32(addrTempRegister, dest1, dest2); - } - - void loadPair32(BaseIndex address, RegisterID dest1, RegisterID dest2) - { - if (address.base == dest1 || address.index == dest1) { - RELEASE_ASSERT(address.base != dest2); - RELEASE_ASSERT(address.index != dest2); - - load32(address.withOffset(4), dest2); - load32(address, dest1); - } else { - load32(address, dest1); - load32(address.withOffset(4), dest2); - } - } - - void store8(RegisterID src, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.sb(src, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - sb src, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.sb(src, addrTempRegister, address.offset); - } - } - - void store8(RegisterID src, BaseIndex address) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.sb(src, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - sb src, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.sb(src, addrTempRegister, address.offset); - } - } - - void store8(RegisterID src, const void* address) - { - if (m_fixedWidth) { - /* - li addrTemp, address - sb src, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.sb(src, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.sb(src, addrTempRegister, adr & 0xffff); - } - } - - void store8(TrustedImm32 imm, void* address) - { - if (m_fixedWidth) { - /* - li immTemp, imm - li addrTemp, address - sb src, 0(addrTemp) - */ - TrustedImm32 imm8(static_cast(imm.m_value)); - move(imm8, immTempRegister); - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.sb(immTempRegister, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - if (!imm.m_value) - m_assembler.sb(MIPSRegisters::zero, addrTempRegister, adr & 0xffff); - else { - TrustedImm32 imm8(static_cast(imm.m_value)); - move(imm8, immTempRegister); - m_assembler.sb(immTempRegister, addrTempRegister, adr & 0xffff); - } - } - } - - void store8(TrustedImm32 imm, Address address) - { - TrustedImm32 imm8(static_cast(imm.m_value)); - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - if (!imm8.m_value) - m_assembler.sb(MIPSRegisters::zero, address.base, address.offset); - else { - move(imm8, immTempRegister); - m_assembler.sb(immTempRegister, address.base, address.offset); - } - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - sb immTemp, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - if (!imm8.m_value && !m_fixedWidth) - m_assembler.sb(MIPSRegisters::zero, addrTempRegister, address.offset); - else { - move(imm8, immTempRegister); - m_assembler.sb(immTempRegister, addrTempRegister, address.offset); - } - } - } - - void store16(RegisterID src, const void* address) - { - if (m_fixedWidth) { - /* - li addrTemp, address - sh src, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.sh(src, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.sh(src, addrTempRegister, adr & 0xffff); - } - } - - void store16(RegisterID src, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - m_assembler.sh(src, address.base, address.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - sh src, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.sh(src, addrTempRegister, address.offset); - } - } - - void store16(RegisterID src, BaseIndex address) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.sh(src, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - sh src, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.sh(src, addrTempRegister, address.offset); - } - } - - void store32(RegisterID src, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.sw(src, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - sw src, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.sw(src, addrTempRegister, address.offset); - } - } - - void store32(RegisterID src, BaseIndex address) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.sw(src, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - sw src, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.sw(src, addrTempRegister, address.offset); - } - } - - void store32(TrustedImm32 imm, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - if (!imm.m_value) - m_assembler.sw(MIPSRegisters::zero, address.base, address.offset); - else { - move(imm, immTempRegister); - m_assembler.sw(immTempRegister, address.base, address.offset); - } - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - sw immTemp, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - if (!imm.m_value && !m_fixedWidth) - m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset); - else { - move(imm, immTempRegister); - m_assembler.sw(immTempRegister, addrTempRegister, address.offset); - } - } - } - - void store32(TrustedImm32 imm, BaseIndex address) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - if (!imm.m_value) - m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset); - else { - move(imm, immTempRegister); - m_assembler.sw(immTempRegister, addrTempRegister, address.offset); - } - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - sw src, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - move(imm, immTempRegister); - m_assembler.sw(immTempRegister, addrTempRegister, address.offset); - } - } - - - void store32(RegisterID src, const void* address) - { - if (m_fixedWidth) { - /* - li addrTemp, address - sw src, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.sw(src, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.sw(src, addrTempRegister, adr & 0xffff); - } - } - - void store32(TrustedImm32 imm, const void* address) - { - if (m_fixedWidth) { - /* - li immTemp, imm - li addrTemp, address - sw src, 0(addrTemp) - */ - move(imm, immTempRegister); - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.sw(immTempRegister, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - if (!imm.m_value) - m_assembler.sw(MIPSRegisters::zero, addrTempRegister, adr & 0xffff); - else { - move(imm, immTempRegister); - m_assembler.sw(immTempRegister, addrTempRegister, adr & 0xffff); - } - } - } - - void storePair32(RegisterID src1, TrustedImm32 imm, Address address) - { - move(imm, addrTempRegister); - storePair32(src1, addrTempRegister, address); - } - - void storePair32(TrustedImmPtr immPtr, TrustedImm32 imm32, Address address) - { - move(immPtr, addrTempRegister); - move(imm32, dataTempRegister); - storePair32(addrTempRegister, dataTempRegister, address); - } - - void storePair32(TrustedImm32 imm1, TrustedImm32 imm2, Address address) - { - move(imm1, addrTempRegister); - RegisterID scratch = addrTempRegister; - if (imm1.m_value != imm2.m_value) { - scratch = dataTempRegister; - move(imm2, scratch); - } - storePair32(addrTempRegister, scratch, address); - } - - void storePair32(RegisterID src1, RegisterID src2, RegisterID dest) - { - storePair32(src1, src2, dest, TrustedImm32(0)); - } - - void storePair32(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset) - { - storePair32(src1, src2, Address(dest, offset.m_value)); - } - - void storePair32(RegisterID src1, RegisterID src2, Address address) - { - store32(src1, address); - store32(src2, address.withOffset(4)); - } - - void storePair32(RegisterID src1, RegisterID src2, BaseIndex address) - { - store32(src1, address); - store32(src2, address.withOffset(4)); - } - - void storePair32(TrustedImm32 imm1, TrustedImm32 imm2, BaseIndex address) - { - store32(imm1, address); - store32(imm2, address.withOffset(4)); - } - - void storePair32(RegisterID src1, TrustedImm32 imm, const void* address) - { - move(TrustedImmPtr(address), addrTempRegister); - move(imm, dataTempRegister); - storePair32(src1, dataTempRegister, addrTempRegister); - } - - void storePair32(RegisterID src1, RegisterID src2, const void* address) - { - move(TrustedImmPtr(address), addrTempRegister); - storePair32(src1, src2, addrTempRegister); - } - - // Floating-point operations: - - static bool supportsFloatingPoint() - { -#if WTF_MIPS_DOUBLE_FLOAT - return true; -#else - return false; -#endif - } - - static bool supportsFloatingPointTruncate() - { -#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2) - return true; -#else - return false; -#endif - } - - static bool supportsFloatingPointSqrt() - { -#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2) - return true; -#else - return false; -#endif - } - - static bool supportsFloatingPointAbs() - { -#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2) - return true; -#else - return false; -#endif - } - - static bool supportsFloatingPointRounding() { return false; } - - // Stack manipulation operations: - // - // The ABI is assumed to provide a stack abstraction to memory, - // containing machine word sized units of data. Push and pop - // operations add and remove a single register sized unit of data - // to or from the stack. Peek and poke operations read or write - // values on the stack, without moving the current stack position. - - void pop(RegisterID dest) - { - m_assembler.lw(dest, MIPSRegisters::sp, 0); - m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4); - } - - void popPair(RegisterID dest1, RegisterID dest2) - { - m_assembler.lw(dest1, MIPSRegisters::sp, 0); - m_assembler.lw(dest2, MIPSRegisters::sp, 4); - m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 8); - } - - void push(RegisterID src) - { - m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4); - m_assembler.sw(src, MIPSRegisters::sp, 0); - } - - void push(Address address) - { - load32(address, dataTempRegister); - push(dataTempRegister); - } - - void push(TrustedImm32 imm) - { - move(imm, immTempRegister); - push(immTempRegister); - } - - void pushPair(RegisterID src1, RegisterID src2) - { - m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -8); - m_assembler.sw(src2, MIPSRegisters::sp, 4); - m_assembler.sw(src1, MIPSRegisters::sp, 0); - } - - // Register move operations: - // - // Move values in registers. - - void move(TrustedImm32 imm, RegisterID dest) - { - if (!imm.m_value && !m_fixedWidth) - move(MIPSRegisters::zero, dest); - else if (m_fixedWidth) { - m_assembler.lui(dest, imm.m_value >> 16); - m_assembler.ori(dest, dest, imm.m_value); - } else - m_assembler.li(dest, imm.m_value); - } - - void move(RegisterID src, RegisterID dest) - { - if (src != dest || m_fixedWidth) - m_assembler.move(dest, src); - } - - void move(TrustedImmPtr imm, RegisterID dest) - { - move(TrustedImm32(imm), dest); - } - - void swap(RegisterID reg1, RegisterID reg2) - { - move(reg1, immTempRegister); - move(reg2, reg1); - move(immTempRegister, reg2); - } - - void signExtend32ToPtr(RegisterID src, RegisterID dest) - { - if (src != dest || m_fixedWidth) - move(src, dest); - } - - void zeroExtend32ToWord(RegisterID src, RegisterID dest) - { - if (src != dest || m_fixedWidth) - move(src, dest); - } - - // Forwards / external control flow operations: - // - // This set of jump and conditional branch operations return a Jump - // object which may linked at a later point, allow forwards jump, - // or jumps that will require external linkage (after the code has been - // relocated). - // - // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge - // respecitvely, for unsigned comparisons the names b, a, be, and ae are - // used (representing the names 'below' and 'above'). - // - // Operands to the comparision are provided in the expected order, e.g. - // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when - // treated as a signed 32bit value, is less than or equal to 5. - // - // jz and jnz test whether the first operand is equal to zero, and take - // an optional second operand of a mask under which to perform the test. - - Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) - { - TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); - MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); - return branch32(cond, dataTempRegister, right8); - } - - Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) - { - TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); - MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); - return branch32(cond, dataTempRegister, right8); - } - - void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) - { - TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); - MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); - compare32(cond, dataTempRegister, right8, dest); - } - - Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); - MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); - return branch32(cond, dataTempRegister, right8); - } - - Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) - { - load32(left, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) - { - if (cond == Equal) - return branchEqual(left, right); - if (cond == NotEqual) - return branchNotEqual(left, right); - if (cond == Above) { - m_assembler.sltu(cmpTempRegister, right, left); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == AboveOrEqual) { - m_assembler.sltu(cmpTempRegister, left, right); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Below) { - m_assembler.sltu(cmpTempRegister, left, right); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == BelowOrEqual) { - m_assembler.sltu(cmpTempRegister, right, left); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == GreaterThan) { - m_assembler.slt(cmpTempRegister, right, left); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == GreaterThanOrEqual) { - m_assembler.slt(cmpTempRegister, left, right); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == LessThan) { - m_assembler.slt(cmpTempRegister, left, right); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == LessThanOrEqual) { - m_assembler.slt(cmpTempRegister, right, left); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - ASSERT(0); - - return Jump(); - } - - Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) - { - if (!m_fixedWidth) { - if (!right.m_value) - return branch32(cond, left, MIPSRegisters::zero); - if (right.m_value >= -32768 && right.m_value <= 32767) { - if (cond == AboveOrEqual) { - m_assembler.sltiu(cmpTempRegister, left, right.m_value); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Below) { - m_assembler.sltiu(cmpTempRegister, left, right.m_value); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == GreaterThanOrEqual) { - m_assembler.slti(cmpTempRegister, left, right.m_value); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == LessThan) { - m_assembler.slti(cmpTempRegister, left, right.m_value); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - } - } - move(right, immTempRegister); - return branch32(cond, left, immTempRegister); - } - - Jump branch32(RelationalCondition cond, RegisterID left, Address right) - { - load32(right, dataTempRegister); - return branch32(cond, left, dataTempRegister); - } - - Jump branch32(RelationalCondition cond, Address left, RegisterID right) - { - load32(left, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) - { - load32(left, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - load32(left, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - load32WithUnalignedHalfWords(left, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) - { - load32(left.m_ptr, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) - { - load32(left.m_ptr, dataTempRegister); - return branch32(cond, dataTempRegister, right); - } - - Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) - { - ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); - m_assembler.andInsn(cmpTempRegister, reg, mask); - switch (cond) { - case Zero: - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - case NonZero: - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - case Signed: - m_assembler.slt(cmpTempRegister, cmpTempRegister, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) - { - ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); - if (!m_fixedWidth) { - if (mask.m_value == -1) { - switch (cond) { - case Zero: - return branchEqual(reg, MIPSRegisters::zero); - case NonZero: - return branchNotEqual(reg, MIPSRegisters::zero); - case Signed: - m_assembler.slt(cmpTempRegister, reg, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } -#if WTF_MIPS_ISA_REV(2) - if (isPowerOf2(mask.m_value)) { - uint16_t pos= bitPosition(mask.m_value); - m_assembler.ext(cmpTempRegister, reg, pos, 1); - switch (cond) { - case Zero: - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - case NonZero: - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - case Signed: - m_assembler.slt(cmpTempRegister, cmpTempRegister, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } -#endif - if (mask.m_value >= 0 && mask.m_value <= 65535) { - m_assembler.andi(cmpTempRegister, reg, mask.m_value); - switch (cond) { - case Zero: - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - case NonZero: - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - case Signed: - m_assembler.slt(cmpTempRegister, cmpTempRegister, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - } - move(mask, immTempRegister); - return branchTest32(cond, reg, immTempRegister); - } - - Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) - { - load32(address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); - } - - Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) - { - load32(address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); - } - - Jump branchTest32(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) - { - load32(address.m_ptr, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); - } - - TrustedImm32 mask8OnTest(ResultCondition cond, TrustedImm32 mask) - { - if (mask.m_value == -1 && !m_fixedWidth) - return TrustedImm32(-1); - return MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); - } - - Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) - { - TrustedImm32 mask8 = mask8OnTest(cond, mask); - MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask8); - } - - Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) - { - TrustedImm32 mask8 = mask8OnTest(cond, mask); - MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask8); - } - - Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) - { - TrustedImm32 mask8 = mask8OnTest(cond, mask); - MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask8); - } - - TrustedImm32 mask16OnTest(ResultCondition cond, TrustedImm32 mask) - { - if (mask.m_value == -1 && !m_fixedWidth) - return TrustedImm32(-1); - return MacroAssemblerHelpers::mask16OnCondition(*this, cond, mask); - } - - Jump branchTest16(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) - { - TrustedImm32 mask16 = mask16OnTest(cond, mask); - MacroAssemblerHelpers::load16OnCondition(*this, cond, address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask16); - } - - Jump branchTest16(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) - { - TrustedImm32 mask16 = mask16OnTest(cond, mask); - MacroAssemblerHelpers::load16OnCondition(*this, cond, address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask16); - } - - Jump branchTest16(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) - { - TrustedImm32 mask16 = mask16OnTest(cond, mask); - MacroAssemblerHelpers::load16OnCondition(*this, cond, address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask16); - } - - Jump jump() - { - return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero); - } - - void farJump(RegisterID target, PtrTag) - { - move(target, MIPSRegisters::t9); - m_assembler.jr(MIPSRegisters::t9); - m_assembler.nop(); - } - - void farJump(TrustedImmPtr target, PtrTag) - { - move(target, MIPSRegisters::t9); - m_assembler.jr(MIPSRegisters::t9); - m_assembler.nop(); - } - - void farJump(Address address, PtrTag) - { - m_fixedWidth = true; - load32(address, MIPSRegisters::t9); - m_assembler.jr(MIPSRegisters::t9); - m_assembler.nop(); - m_fixedWidth = false; - } - - void farJump(AbsoluteAddress address, PtrTag) - { - m_fixedWidth = true; - load32(address.m_ptr, MIPSRegisters::t9); - m_assembler.jr(MIPSRegisters::t9); - m_assembler.nop(); - m_fixedWidth = false; - } - - ALWAYS_INLINE void farJump(RegisterID target, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(target, NoPtrTag); } - ALWAYS_INLINE void farJump(Address address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); } - ALWAYS_INLINE void farJump(AbsoluteAddress address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); } - - void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) - { - m_assembler.vmov(dest1, dest2, src); - } - - void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest) - { - m_assembler.vmov(dest, src1, src2); - } - - // Arithmetic control flow operations: - // - // This set of conditional branch operations branch based - // on the result of an arithmetic operation. The operation - // is performed as normal, storing the result. - // - // * jz operations branch if the result is zero. - // * jo operations branch if the (signed) arithmetic - // operation caused an overflow to occur. - - Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - move dest, dataTemp - xor cmpTemp, dataTemp, src - bltz cmpTemp, No_overflow # diff sign bit -> no overflow - addu dest, dataTemp, src - xor cmpTemp, dest, dataTemp - bgez cmpTemp, No_overflow # same sign big -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - move(dest, dataTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src); - m_assembler.bltz(cmpTempRegister, 10); - m_assembler.addu(dest, dataTempRegister, src); - m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - add32(src, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == PositiveOrZero) { - add32(src, dest); - // Check if dest is not negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - add32(src, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - add32(src, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - move dataTemp, op1 - xor cmpTemp, dataTemp, op2 - bltz cmpTemp, No_overflow # diff sign bit -> no overflow - addu dest, dataTemp, op2 - xor cmpTemp, dest, dataTemp - bgez cmpTemp, No_overflow # same sign big -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - move(op1, dataTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2); - m_assembler.bltz(cmpTempRegister, 10); - m_assembler.addu(dest, dataTempRegister, op2); - m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - add32(op1, op2, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == PositiveOrZero) { - add32(op1, op2, dest); - // Check if dest is not negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - add32(op1, op2, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - add32(op1, op2, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) - { - return branchAdd32(cond, dest, imm, dest); - } - - Jump branchAdd32(ResultCondition cond, Address address, RegisterID dest) - { - load32(address, immTempRegister); - return branchAdd32(cond, immTempRegister, dest); - } - - Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) - { - if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth) { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - if (imm.m_value >= 0) { - m_assembler.bltz(src, 9); - m_assembler.addiu(dest, src, imm.m_value); - m_assembler.bgez(dest, 7); - m_assembler.nop(); - } else { - m_assembler.bgez(src, 9); - m_assembler.addiu(dest, src, imm.m_value); - m_assembler.bltz(dest, 7); - m_assembler.nop(); - } - return jump(); - } - m_assembler.addiu(dest, src, imm.m_value); - if (cond == Signed) { - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == PositiveOrZero) { - // Check if dest is not negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) - return branchEqual(dest, MIPSRegisters::zero); - if (cond == NonZero) - return branchNotEqual(dest, MIPSRegisters::zero); - ASSERT_NOT_REACHED(); - return Jump(); - } - move(imm, immTempRegister); - return branchAdd32(cond, src, immTempRegister, dest); - } - - Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - if (m_fixedWidth) { - /* - load dest, dataTemp - move imm, immTemp - xor cmpTemp, dataTemp, immTemp - addu dataTemp, dataTemp, immTemp - store dataTemp, dest - bltz cmpTemp, No_overflow # diff sign bit -> no overflow - xor cmpTemp, dataTemp, immTemp - bgez cmpTemp, No_overflow # same sign big -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - load32(dest.m_ptr, dataTempRegister); - move(imm, immTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - store32(dataTempRegister, dest.m_ptr); - m_assembler.bltz(cmpTempRegister, 9); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - } else { - uintptr_t adr = reinterpret_cast(dest.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(dataTempRegister, addrTempRegister, adr & 0xffff); - if (imm.m_value >= 0 && imm.m_value <= 32767) { - move(dataTempRegister, cmpTempRegister); - m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value); - m_assembler.bltz(cmpTempRegister, 9); - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - m_assembler.bgez(dataTempRegister, 7); - m_assembler.nop(); - } else if (imm.m_value >= -32768 && imm.m_value < 0) { - move(dataTempRegister, cmpTempRegister); - m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value); - m_assembler.bgez(cmpTempRegister, 9); - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - m_assembler.bltz(cmpTempRegister, 7); - m_assembler.nop(); - } else { - move(imm, immTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - m_assembler.bltz(cmpTempRegister, 10); - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - } - } - return jump(); - } - if (m_fixedWidth) { - move(imm, immTempRegister); - load32(dest.m_ptr, dataTempRegister); - add32(immTempRegister, dataTempRegister); - store32(dataTempRegister, dest.m_ptr); - } else { - uintptr_t adr = reinterpret_cast(dest.m_ptr); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.lw(dataTempRegister, addrTempRegister, adr & 0xffff); - add32(imm, dataTempRegister); - m_assembler.sw(dataTempRegister, addrTempRegister, adr & 0xffff); - } - if (cond == Signed) { - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == PositiveOrZero) { - // Check if dest is not negative. - m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) - return branchEqual(dataTempRegister, MIPSRegisters::zero); - if (cond == NonZero) - return branchNotEqual(dataTempRegister, MIPSRegisters::zero); - ASSERT(0); - return Jump(); - } - - Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - if (m_fixedWidth) { - /* - load dest, dataTemp - move imm, immTemp - xor cmpTemp, dataTemp, immTemp - addu dataTemp, dataTemp, immTemp - store dataTemp, dest - bltz cmpTemp, No_overflow # diff sign bit -> no overflow - xor cmpTemp, dataTemp, immTemp - bgez cmpTemp, No_overflow # same sign big -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - load32(dest, dataTempRegister); - move(imm, immTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - store32(dataTempRegister, dest); - m_assembler.bltz(cmpTempRegister, 9); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - } else { - m_assembler.lw(dataTempRegister, dest.base, dest.offset); - if (imm.m_value >= 0 && imm.m_value <= 32767) { - move(dataTempRegister, cmpTempRegister); - m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value); - m_assembler.bltz(cmpTempRegister, 9); - m_assembler.sw(dataTempRegister, dest.base, dest.offset); - m_assembler.bgez(dataTempRegister, 7); - m_assembler.nop(); - } else if (imm.m_value >= -32768 && imm.m_value < 0) { - move(dataTempRegister, cmpTempRegister); - m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value); - m_assembler.bgez(cmpTempRegister, 9); - m_assembler.sw(dataTempRegister, dest.base, dest.offset); - m_assembler.bltz(cmpTempRegister, 7); - m_assembler.nop(); - } else { - move(imm, immTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister); - m_assembler.bltz(cmpTempRegister, 10); - m_assembler.sw(dataTempRegister, dest.base, dest.offset); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - } - } - return jump(); - } - if (m_fixedWidth) { - move(imm, immTempRegister); - load32(dest, dataTempRegister); - add32(immTempRegister, dataTempRegister); - store32(dataTempRegister, dest); - } else { - m_assembler.lw(dataTempRegister, dest.base, dest.offset); - add32(imm, dataTempRegister); - m_assembler.sw(dataTempRegister, dest.base, dest.offset); - } - if (cond == Signed) { - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == PositiveOrZero) { - // Check if dest is not negative. - m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero); - return branchEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) - return branchEqual(dataTempRegister, MIPSRegisters::zero); - if (cond == NonZero) - return branchNotEqual(dataTempRegister, MIPSRegisters::zero); - ASSERT(0); - return Jump(); - } - - Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - mult src, dest - mfhi dataTemp - mflo dest - sra addrTemp, dest, 31 - beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - m_assembler.mult(src1, src2); - m_assembler.mfhi(dataTempRegister); - m_assembler.mflo(dest); - m_assembler.sra(addrTempRegister, dest, 31); - m_assembler.beq(dataTempRegister, addrTempRegister, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - mul32(src1, src2, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - mul32(src1, src2, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - mul32(src1, src2, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - mult src, dest - mfhi dataTemp - mflo dest - sra addrTemp, dest, 31 - beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - m_assembler.mult(src, dest); - m_assembler.mfhi(dataTempRegister); - m_assembler.mflo(dest); - m_assembler.sra(addrTempRegister, dest, 31); - m_assembler.beq(dataTempRegister, addrTempRegister, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - mul32(src, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - mul32(src, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - mul32(src, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) - { - move(imm, immTempRegister); - return branchMul32(cond, immTempRegister, src, dest); - } - - Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - move dest, dataTemp - xor cmpTemp, dataTemp, src - bgez cmpTemp, No_overflow # same sign bit -> no overflow - subu dest, dataTemp, src - xor cmpTemp, dest, dataTemp - bgez cmpTemp, No_overflow # same sign bit -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - move(dest, dataTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src); - m_assembler.bgez(cmpTempRegister, 10); - m_assembler.subu(dest, dataTempRegister, src); - m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - sub32(src, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - sub32(src, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - sub32(src, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) - { - move(imm, immTempRegister); - return branchSub32(cond, immTempRegister, dest); - } - - Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) - { - move(imm, immTempRegister); - return branchSub32(cond, src, immTempRegister, dest); - } - - Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - move dataTemp, op1 - xor cmpTemp, dataTemp, op2 - bgez cmpTemp, No_overflow # same sign bit -> no overflow - subu dest, dataTemp, op2 - xor cmpTemp, dest, dataTemp - bgez cmpTemp, No_overflow # same sign bit -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - move(op1, dataTempRegister); - m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2); - m_assembler.bgez(cmpTempRegister, 10); - m_assembler.subu(dest, dataTempRegister, op2); - m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister); - m_assembler.bgez(cmpTempRegister, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - sub32(op1, op2, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - sub32(op1, op2, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - sub32(op1, op2, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - Jump branchNeg32(ResultCondition cond, RegisterID srcDest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - if (cond == Overflow) { - /* - bgez srcDest, No_overflow # positive input -> no overflow - subu srcDest, zero, srcDest - bgez srcDest, No_overflow # negative input, positive output -> no overflow - nop - b Overflow - nop - b No_overflow - nop - nop - nop - No_overflow: - */ - m_assembler.bgez(srcDest, 9); - m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest); - m_assembler.bgez(srcDest, 7); - m_assembler.nop(); - return jump(); - } - if (cond == Signed) { - m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, srcDest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest); - return branchEqual(srcDest, MIPSRegisters::zero); - } - if (cond == NonZero) { - m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest); - return branchNotEqual(srcDest, MIPSRegisters::zero); - } - ASSERT_NOT_REACHED(); - return Jump(); - } - - Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero)); - if (cond == Signed) { - or32(src, dest); - // Check if dest is negative. - m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); - } - if (cond == Zero) { - or32(src, dest); - return branchEqual(dest, MIPSRegisters::zero); - } - if (cond == NonZero) { - or32(src, dest); - return branchNotEqual(dest, MIPSRegisters::zero); - } - ASSERT(0); - return Jump(); - } - - // Miscellaneous operations: - - void breakpoint() - { - m_assembler.bkpt(); - } - - static bool isBreakpoint(void* address) { return MIPSAssembler::isBkpt(address); } - - Call nearCall() - { - /* We need two words for relaxation. */ - m_assembler.nop(); - m_assembler.nop(); - m_assembler.jal(); - m_assembler.nop(); - return Call(m_assembler.label(), Call::LinkableNear); - } - - Call nearTailCall() - { - m_assembler.nop(); - m_assembler.nop(); - m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 0); - m_assembler.nop(); - insertRelaxationWords(); - return Call(m_assembler.label(), Call::LinkableNearTail); - } - - Call call(PtrTag) - { - m_assembler.lui(MIPSRegisters::t9, 0); - m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0); - m_assembler.jalr(MIPSRegisters::t9); - m_assembler.nop(); - return Call(m_assembler.label(), Call::Linkable); - } - - Call call(RegisterID target, PtrTag) - { - move(target, MIPSRegisters::t9); - m_assembler.jalr(MIPSRegisters::t9); - m_assembler.nop(); - return Call(m_assembler.label(), Call::None); - } - - Call call(Address address, PtrTag) - { - m_fixedWidth = true; - load32(address, MIPSRegisters::t9); - m_assembler.jalr(MIPSRegisters::t9); - m_assembler.nop(); - m_fixedWidth = false; - return Call(m_assembler.label(), Call::None); - } - - ALWAYS_INLINE Call call(RegisterID callTag) { return UNUSED_PARAM(callTag), call(NoPtrTag); } - ALWAYS_INLINE Call call(RegisterID target, RegisterID callTag) { return UNUSED_PARAM(callTag), call(target, NoPtrTag); } - ALWAYS_INLINE Call call(Address address, RegisterID callTag) { return UNUSED_PARAM(callTag), call(address, NoPtrTag); } - - void ret() - { - m_assembler.jr(MIPSRegisters::ra); - m_assembler.nop(); - } - - void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) - { - if (cond == Equal) { - if (right == MIPSRegisters::zero && !m_fixedWidth) - m_assembler.sltiu(dest, left, 1); - else { - m_assembler.xorInsn(dest, left, right); - m_assembler.sltiu(dest, dest, 1); - } - } else if (cond == NotEqual) { - if (right == MIPSRegisters::zero && !m_fixedWidth) - m_assembler.sltu(dest, MIPSRegisters::zero, left); - else { - m_assembler.xorInsn(dest, left, right); - m_assembler.sltu(dest, MIPSRegisters::zero, dest); - } - } else if (cond == Above) - m_assembler.sltu(dest, right, left); - else if (cond == AboveOrEqual) { - m_assembler.sltu(dest, left, right); - m_assembler.xori(dest, dest, 1); - } else if (cond == Below) - m_assembler.sltu(dest, left, right); - else if (cond == BelowOrEqual) { - m_assembler.sltu(dest, right, left); - m_assembler.xori(dest, dest, 1); - } else if (cond == GreaterThan) - m_assembler.slt(dest, right, left); - else if (cond == GreaterThanOrEqual) { - m_assembler.slt(dest, left, right); - m_assembler.xori(dest, dest, 1); - } else if (cond == LessThan) - m_assembler.slt(dest, left, right); - else if (cond == LessThanOrEqual) { - m_assembler.slt(dest, right, left); - m_assembler.xori(dest, dest, 1); - } - } - - void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) - { - if (!right.m_value && !m_fixedWidth) - compare32(cond, left, MIPSRegisters::zero, dest); - else { - move(right, immTempRegister); - compare32(cond, left, immTempRegister, dest); - } - } - - void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) - { - ASSERT((cond == Zero) || (cond == NonZero)); - TrustedImm32 mask8 = mask8OnTest(cond, mask); - MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); - if ((mask8.m_value & 0xff) == 0xff && !m_fixedWidth) { - if (cond == Zero) - m_assembler.sltiu(dest, dataTempRegister, 1); - else - m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister); - } else { - move(mask8, immTempRegister); - m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister); - if (cond == Zero) - m_assembler.sltiu(dest, cmpTempRegister, 1); - else - m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister); - } - } - - void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) - { - ASSERT((cond == Zero) || (cond == NonZero)); - load32(address, dataTempRegister); - if (mask.m_value == -1 && !m_fixedWidth) { - if (cond == Zero) - m_assembler.sltiu(dest, dataTempRegister, 1); - else - m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister); - } else { - move(mask, immTempRegister); - m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister); - if (cond == Zero) - m_assembler.sltiu(dest, cmpTempRegister, 1); - else - m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister); - } - } - - DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest) - { - m_fixedWidth = true; - DataLabel32 label(this); - move(imm, dest); - m_fixedWidth = false; - return label; - } - - DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) - { - m_fixedWidth = true; - DataLabelPtr label(this); - move(initialValue, dest); - m_fixedWidth = false; - return label; - } - - Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) - { - m_fixedWidth = true; - dataLabel = moveWithPatch(initialRightValue, immTempRegister); - m_assembler.nop(); - m_assembler.nop(); - Jump temp = branch32(cond, left, immTempRegister); - m_fixedWidth = false; - return temp; - } - - Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) - { - m_fixedWidth = true; - load32(left, dataTempRegister); - dataLabel = moveWithPatch(initialRightValue, immTempRegister); - m_assembler.nop(); - m_assembler.nop(); - Jump temp = branch32(cond, dataTempRegister, immTempRegister); - m_fixedWidth = false; - return temp; - } - - Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) - { - m_fixedWidth = true; - load32(left, dataTempRegister); - dataLabel = moveWithPatch(initialRightValue, immTempRegister); - Jump temp = branch32(cond, dataTempRegister, immTempRegister); - m_fixedWidth = false; - return temp; - } - - DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, Address address) - { - m_fixedWidth = true; - DataLabelPtr dataLabel = moveWithPatch(initialValue, dataTempRegister); - store32(dataTempRegister, address); - m_fixedWidth = false; - return dataLabel; - } - - DataLabelPtr storePtrWithPatch(Address address) - { - return storePtrWithPatch(TrustedImmPtr(nullptr), address); - } - - void loadFloat(BaseIndex address, FPRegisterID dest) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lwc1(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lwc1 dest, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lwc1(dest, addrTempRegister, address.offset); - } - } - - void loadFloat(Address address, FPRegisterID dest) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - m_assembler.lwc1(dest, address.base, address.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - lwc1 dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lwc1(dest, addrTempRegister, address.offset); - } - } - - void loadDouble(Address address, FPRegisterID dest) - { -#if WTF_MIPS_ISA(1) - /* - li addrTemp, address.offset - addu addrTemp, addrTemp, base - lwc1 dest, 0(addrTemp) - lwc1 dest+1, 4(addrTemp) - */ - move(TrustedImm32(address.offset), addrTempRegister); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lwc1(dest, addrTempRegister, 0); - m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4); -#else - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) { - m_assembler.ldc1(dest, address.base, address.offset); - } else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - ldc1 dest, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.ldc1(dest, addrTempRegister, address.offset); - } -#endif - } - - void loadDouble(BaseIndex address, FPRegisterID dest) - { -#if WTF_MIPS_ISA(1) - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.lwc1(dest, addrTempRegister, address.offset); - m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - lwc1 dest, (address.offset & 0xffff)(at) - lwc1 dest+1, (address.offset & 0xffff + 4)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.lwc1(dest, addrTempRegister, address.offset); - m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4); - } -#else - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.ldc1(dest, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - ldc1 dest, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.ldc1(dest, addrTempRegister, address.offset); - } -#endif - } - - void loadDouble(TrustedImmPtr address, FPRegisterID dest) - { -#if WTF_MIPS_ISA(1) - /* - li addrTemp, address - lwc1 dest, 0(addrTemp) - lwc1 dest+1, 4(addrTemp) - */ - move(address, addrTempRegister); - m_assembler.lwc1(dest, addrTempRegister, 0); - m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4); -#else - if (m_fixedWidth) { - /* - li addrTemp, address - ldc1 dest, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.ldc1(dest, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address.m_value); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.ldc1(dest, addrTempRegister, adr & 0xffff); - } -#endif - } - - void storeFloat(FPRegisterID src, BaseIndex address) - { - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.swc1(src, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - swc1 src, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.swc1(src, addrTempRegister, address.offset); - } - } - - void storeFloat(FPRegisterID src, Address address) - { - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.swc1(src, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - swc1 src, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.swc1(src, addrTempRegister, address.offset); - } - } - - void storeDouble(FPRegisterID src, Address address) - { -#if WTF_MIPS_ISA(1) - /* - li addrTemp, address.offset - addu addrTemp, addrTemp, base - swc1 dest, 0(addrTemp) - swc1 dest+1, 4(addrTemp) - */ - move(TrustedImm32(address.offset), addrTempRegister); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.swc1(src, addrTempRegister, 0); - m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4); -#else - if (address.offset >= -32768 && address.offset <= 32767 - && !m_fixedWidth) - m_assembler.sdc1(src, address.base, address.offset); - else { - /* - lui addrTemp, (offset + 0x8000) >> 16 - addu addrTemp, addrTemp, base - sdc1 src, (offset & 0xffff)(addrTemp) - */ - m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.sdc1(src, addrTempRegister, address.offset); - } -#endif - } - - void storeDouble(FPRegisterID src, BaseIndex address) - { -#if WTF_MIPS_ISA(1) - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.swc1(src, addrTempRegister, address.offset); - m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - swc1 src, (address.offset & 0xffff)(at) - swc1 src+1, (address.offset & 0xffff + 4)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.swc1(src, addrTempRegister, address.offset); - m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4); - } -#else - if (!m_fixedWidth) { - loadAddress(address, LoadAddressMode::ScaleAndAddOffsetIfOffsetIsOutOfBounds); - m_assembler.sdc1(src, addrTempRegister, address.offset); - } else { - /* - sll addrTemp, address.index, address.scale - addu addrTemp, addrTemp, address.base - lui immTemp, (address.offset + 0x8000) >> 16 - addu addrTemp, addrTemp, immTemp - sdc1 src, (address.offset & 0xffff)(at) - */ - m_assembler.sll(addrTempRegister, address.index, address.scale); - m_assembler.addu(addrTempRegister, addrTempRegister, address.base); - m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16); - m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); - m_assembler.sdc1(src, addrTempRegister, address.offset); - } -#endif - } - - void storeDouble(FPRegisterID src, TrustedImmPtr address) - { -#if WTF_MIPS_ISA(1) - move(address, addrTempRegister); - m_assembler.swc1(src, addrTempRegister, 0); - m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4); -#else - if (m_fixedWidth) { - /* - li addrTemp, address - sdc1 src, 0(addrTemp) - */ - move(TrustedImmPtr(address), addrTempRegister); - m_assembler.sdc1(src, addrTempRegister, 0); - } else { - uintptr_t adr = reinterpret_cast(address.m_value); - m_assembler.lui(addrTempRegister, (adr + 0x8000) >> 16); - m_assembler.sdc1(src, addrTempRegister, adr & 0xffff); - } -#endif - } - - void moveDouble(FPRegisterID src, FPRegisterID dest) - { - if (src != dest || m_fixedWidth) - m_assembler.movd(dest, src); - } - - void moveDouble(FPRegisterID src, RegisterID dest) - { - m_assembler.mfc1(dest, src); - m_assembler.mfc1(RegisterID(dest + 1), FPRegisterID(src + 1)); - } - - void moveZeroToDouble(FPRegisterID reg) - { - convertInt32ToDouble(MIPSRegisters::zero, reg); - } - - void swapDouble(FPRegisterID fr1, FPRegisterID fr2) - { - if (fr1 == fr2) - return; - moveDouble(fr1, fpTempRegister); - moveDouble(fr2, fr1); - moveDouble(fpTempRegister, fr2); - } - - void addDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.addd(dest, dest, src); - } - - void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - m_assembler.addd(dest, op1, op2); - } - - void addDouble(Address src, FPRegisterID dest) - { - loadDouble(src, fpTempRegister); - m_assembler.addd(dest, dest, fpTempRegister); - } - - void addDouble(AbsoluteAddress address, FPRegisterID dest) - { - loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); - m_assembler.addd(dest, dest, fpTempRegister); - } - - // andDouble and orDouble are a bit convoluted to implement - // because we don't have FP instructions for those - // operations. That means we'll have to go back and forth between - // the FPU and the CPU, which accounts for most of the code here. - void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - m_assembler.mfc1(immTempRegister, op1); - m_assembler.mfc1(dataTempRegister, op2); - m_assembler.andInsn(cmpTempRegister, immTempRegister, dataTempRegister); - m_assembler.mtc1(cmpTempRegister, dest); - -#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64 - m_assembler.mfhc1(immTempRegister, op1); - m_assembler.mfhc1(dataTempRegister, op2); -#else - m_assembler.mfc1(immTempRegister, FPRegisterID(op1+1)); - m_assembler.mfc1(dataTempRegister, FPRegisterID(op2+1)); -#endif - m_assembler.andInsn(cmpTempRegister, immTempRegister, dataTempRegister); -#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64 - m_assembler.mthc1(cmpTempRegister, dest); -#else - m_assembler.mtc1(cmpTempRegister, FPRegisterID(dest+1)); -#endif - } - - void orDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - m_assembler.mfc1(immTempRegister, op1); - m_assembler.mfc1(dataTempRegister, op2); - m_assembler.orInsn(cmpTempRegister, immTempRegister, dataTempRegister); - m_assembler.mtc1(cmpTempRegister, dest); - -#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64 - m_assembler.mfhc1(immTempRegister, op1); - m_assembler.mfhc1(dataTempRegister, op2); -#else - m_assembler.mfc1(immTempRegister, FPRegisterID(op1+1)); - m_assembler.mfc1(dataTempRegister, FPRegisterID(op2+1)); -#endif - m_assembler.orInsn(cmpTempRegister, immTempRegister, dataTempRegister); -#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64 - m_assembler.mthc1(cmpTempRegister, dest); -#else - m_assembler.mtc1(cmpTempRegister, FPRegisterID(dest+1)); -#endif - } - - void subDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.subd(dest, dest, src); - } - - void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - m_assembler.subd(dest, op1, op2); - } - - void subDouble(Address src, FPRegisterID dest) - { - loadDouble(src, fpTempRegister); - m_assembler.subd(dest, dest, fpTempRegister); - } - - void mulDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.muld(dest, dest, src); - } - - void mulDouble(Address src, FPRegisterID dest) - { - loadDouble(src, fpTempRegister); - m_assembler.muld(dest, dest, fpTempRegister); - } - - void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - m_assembler.muld(dest, op1, op2); - } - - void divDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.divd(dest, dest, src); - } - - void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - m_assembler.divd(dest, op1, op2); - } - - void divDouble(Address src, FPRegisterID dest) - { - loadDouble(src, fpTempRegister); - m_assembler.divd(dest, dest, fpTempRegister); - } - - void negateDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.negd(dest, src); - } - - void convertInt32ToDouble(RegisterID src, FPRegisterID dest) - { - m_assembler.mtc1(src, fpTempRegister); - m_assembler.cvtdw(dest, fpTempRegister); - } - - void convertInt32ToDouble(Address src, FPRegisterID dest) - { - load32(src, dataTempRegister); - m_assembler.mtc1(dataTempRegister, fpTempRegister); - m_assembler.cvtdw(dest, fpTempRegister); - } - - void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) - { - load32(src.m_ptr, dataTempRegister); - m_assembler.mtc1(dataTempRegister, fpTempRegister); - m_assembler.cvtdw(dest, fpTempRegister); - } - - void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) - { - m_assembler.cvtds(dst, src); - } - - void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) - { - m_assembler.cvtsd(dst, src); - } - - void insertRelaxationWords() - { - /* We need four words for relaxation. */ - m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 3); // Jump over nops; - m_assembler.nop(); - m_assembler.nop(); - m_assembler.nop(); - } - - Jump branchTrue() - { - m_assembler.appendJump(); - m_assembler.bc1t(); - m_assembler.nop(); - insertRelaxationWords(); - return Jump(m_assembler.label()); - } - - Jump branchFalse() - { - m_assembler.appendJump(); - m_assembler.bc1f(); - m_assembler.nop(); - insertRelaxationWords(); - return Jump(m_assembler.label()); - } - - Jump branchEqual(RegisterID rs, RegisterID rt) - { - m_assembler.appendJump(); - m_assembler.beq(rs, rt, 0); - m_assembler.nop(); - insertRelaxationWords(); - return Jump(m_assembler.label()); - } - - Jump branchNotEqual(RegisterID rs, RegisterID rt) - { - m_assembler.appendJump(); - m_assembler.bne(rs, rt, 0); - m_assembler.nop(); - insertRelaxationWords(); - return Jump(m_assembler.label()); - } - - Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) - { - if (cond == DoubleEqualAndOrdered) { - m_assembler.ceqd(left, right); - return branchTrue(); - } - if (cond == DoubleNotEqualAndOrdered) { - m_assembler.cueqd(left, right); - return branchFalse(); // false - } - if (cond == DoubleGreaterThanAndOrdered) { - m_assembler.cngtd(left, right); - return branchFalse(); // false - } - if (cond == DoubleGreaterThanOrEqualAndOrdered) { - m_assembler.cnged(left, right); - return branchFalse(); // false - } - if (cond == DoubleLessThanAndOrdered) { - m_assembler.cltd(left, right); - return branchTrue(); - } - if (cond == DoubleLessThanOrEqualAndOrdered) { - m_assembler.cled(left, right); - return branchTrue(); - } - if (cond == DoubleEqualOrUnordered) { - m_assembler.cueqd(left, right); - return branchTrue(); - } - if (cond == DoubleNotEqualOrUnordered) { - m_assembler.ceqd(left, right); - return branchFalse(); // false - } - if (cond == DoubleGreaterThanOrUnordered) { - m_assembler.coled(left, right); - return branchFalse(); // false - } - if (cond == DoubleGreaterThanOrEqualOrUnordered) { - m_assembler.coltd(left, right); - return branchFalse(); // false - } - if (cond == DoubleLessThanOrUnordered) { - m_assembler.cultd(left, right); - return branchTrue(); - } - if (cond == DoubleLessThanOrEqualOrUnordered) { - m_assembler.culed(left, right); - return branchTrue(); - } - ASSERT(0); - - return Jump(); - } - - // Truncates 'src' to an integer, and places the resulting 'dest'. - // If the result is not representable as a 32 bit value, branch. - enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; - - Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) - { - m_assembler.truncwd(fpTempRegister, src); - m_assembler.cfc1(dataTempRegister, MIPSRegisters::fcsr); - m_assembler.mfc1(dest, fpTempRegister); - and32(TrustedImm32(MIPSAssembler::FP_CAUSE_INVALID_OPERATION), dataTempRegister); - return branch32(branchType == BranchIfTruncateFailed ? NotEqual : Equal, dataTempRegister, MIPSRegisters::zero); - } - - // Result is undefined if the value is outside of the integer range. - void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) - { - m_assembler.truncwd(fpTempRegister, src); - m_assembler.mfc1(dest, fpTempRegister); - } - - // Result is undefined if src > 2^31 - void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) - { - m_assembler.truncwd(fpTempRegister, src); - m_assembler.mfc1(dest, fpTempRegister); - } - - // Convert 'src' to an integer, and places the resulting 'dest'. - // If the result is not representable as a 32 bit value, branch. - // May also branch for some values that are representable in 32 bits - // (specifically, in this case, 0). - void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true) - { - m_assembler.cvtwd(fpTempRegister, src); - m_assembler.mfc1(dest, fpTempRegister); - - // If the result is zero, it might have been -0.0, and the double comparison won't catch this! - if (negZeroCheck) - failureCases.append(branch32(Equal, dest, MIPSRegisters::zero)); - - // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. - convertInt32ToDouble(dest, fpTemp); - failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fpTemp, src)); - } - - Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) - { - m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero); - return branchDouble(DoubleNotEqualAndOrdered, reg, scratch); - } - - Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch) - { - m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero); - return branchDouble(DoubleEqualOrUnordered, reg, scratch); - } - - // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. - static RelationalCondition invert(RelationalCondition cond) - { - RelationalCondition r; - if (cond == Equal) - r = NotEqual; - else if (cond == NotEqual) - r = Equal; - else if (cond == Above) - r = BelowOrEqual; - else if (cond == AboveOrEqual) - r = Below; - else if (cond == Below) - r = AboveOrEqual; - else if (cond == BelowOrEqual) - r = Above; - else if (cond == GreaterThan) - r = LessThanOrEqual; - else if (cond == GreaterThanOrEqual) - r = LessThan; - else if (cond == LessThan) - r = GreaterThanOrEqual; - else if (cond == LessThanOrEqual) - r = GreaterThan; - return r; - } - - void nop() - { - m_assembler.nop(); - } - - void memoryFence() - { - m_assembler.sync(); - } - - void abortWithReason(AbortReason reason) - { - move(TrustedImm32(reason), dataTempRegister); - breakpoint(); - } - - void storeFence() - { - m_assembler.sync(); - } - - void abortWithReason(AbortReason reason, intptr_t misc) - { - move(TrustedImm32(misc), immTempRegister); - abortWithReason(reason); - } - - template - static CodePtr readCallTarget(CodeLocationCall call) - { - return CodePtr(reinterpret_cast(MIPSAssembler::readCallTarget(call.dataLocation()))); - } - - template - static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) - { - MIPSAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); - } - - static ptrdiff_t maxJumpReplacementSize() - { - MIPSAssembler::maxJumpReplacementSize(); - return 0; - } - - static ptrdiff_t patchableJumpSize() - { - return MIPSAssembler::patchableJumpSize(); - } - - static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } - static bool canJumpReplacePatchableBranch32WithPatch() { return false; } - - template - static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) - { - UNREACHABLE_FOR_PLATFORM(); - return CodeLocationLabel(); - } - - template - static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) - { - return label.labelAtOffset(0); - } - - template - static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) - { - MIPSAssembler::revertJumpToMove(instructionStart.dataLocation(), immTempRegister, reinterpret_cast(initialValue) & 0xffff); - } - - template - static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) - { - UNREACHABLE_FOR_PLATFORM(); - return CodeLocationLabel(); - } - - template - static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) - { - UNREACHABLE_FOR_PLATFORM(); - } - - template - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) - { - UNREACHABLE_FOR_PLATFORM(); - } - - template - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) - { - MIPSAssembler::relinkCall(call.dataLocation(), destination.taggedPtr()); - } - - template - static void repatchCall(CodeLocationCall call, CodePtr destination) - { - MIPSAssembler::relinkCall(call.dataLocation(), destination.taggedPtr()); - } - -private: - // If m_fixedWidth is true, we will generate a fixed number of instructions. - // Otherwise, we can emit any number of instructions. - bool m_fixedWidth; - - friend class LinkBuffer; - - template - static void linkCall(void* code, Call call, CodePtr function) - { - if (call.isFlagSet(Call::Tail)) - MIPSAssembler::linkJump(code, call.m_label, function.taggedPtr()); - else - MIPSAssembler::linkCall(code, call.m_label, function.taggedPtr()); - } - -}; - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) && CPU(MIPS) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerRISCV64.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerRISCV64.h index ac8cfdf8..a8c72437 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerRISCV64.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerRISCV64.h @@ -138,6 +138,7 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { } enum ResultCondition { + Carry, // <- not implemented Overflow, Signed, PositiveOrZero, @@ -677,6 +678,8 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.slliInsn(dest, src, uint32_t(imm.m_value & ((1 << 6) - 1))); } @@ -726,6 +729,8 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.sraiInsn(dest, src, uint32_t(imm.m_value & ((1 << 6) - 1))); } @@ -751,6 +756,13 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { m_assembler.maskRegister<32>(dest); } + void addUnsignedRightShift32(RegisterID src1, RegisterID src2, TrustedImm32 amount, RegisterID dest) + { + // dest = src1 + (src2 >> amount) + urshift32(src2, amount, dataTempRegister); + add32(src1, dataTempRegister, dest); + } + void urshift64(RegisterID shiftAmount, RegisterID dest) { urshift64(dest, shiftAmount, dest); @@ -768,6 +780,8 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return move(src, dest); m_assembler.srliInsn(dest, src, uint32_t(imm.m_value & ((1 << 6) - 1))); } @@ -1223,6 +1237,13 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { m_assembler.sdInsn(temp.memory(), immRegister, Imm::S<0>()); } + void transfer32(Address src, Address dest) + { + auto temp = temps(); + load32(src, temp.data()); + store32(temp.data(), dest); + } + void transfer64(Address src, Address dest) { auto temp = temps(); @@ -1235,6 +1256,25 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { transfer64(src, dest); } + void transfer32(BaseIndex src, BaseIndex dest) + { + auto temp = temps(); + load32(src, temp.data()); + store32(temp.data(), dest); + } + + void transfer64(BaseIndex src, BaseIndex dest) + { + auto temp = temps(); + load64(src, temp.data()); + store64(temp.data(), dest); + } + + void transferPtr(BaseIndex src, BaseIndex dest) + { + transfer64(src, dest); + } + void storePair32(RegisterID src1, RegisterID src2, RegisterID dest) { storePair32(src1, src2, dest, TrustedImm32(0)); @@ -1916,6 +1956,12 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); } + template + static void replaceWithNops(CodeLocationLabel instructionStart, size_t memoryToFillWithNopsInBytes) + { + Assembler::replaceWithNops(instructionStart.dataLocation(), memoryToFillWithNopsInBytes); + } + static ptrdiff_t maxJumpReplacementSize() { return Assembler::maxJumpReplacementSize(); @@ -2376,6 +2422,15 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { return branch32(cond, address, imm); } + Jump branch32WithMemory16(RelationalCondition cond, Address left, RegisterID right) + { + auto temp = temps(); + MacroAssemblerHelpers::load16OnCondition(*this, cond, left, temp.data()); + m_assembler.signExtend<32>(temp.data(), temp.data()); + m_assembler.signExtend<32>(temp.memory(), right); + return makeBranch(cond, temp.data(), temp.memory()); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) { return branchAdd32(cond, dest, src, dest); @@ -3035,7 +3090,8 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { Call call(RegisterID target, RegisterID callTag) { UNUSED_PARAM(callTag); return call(target, NoPtrTag); } Call call(Address address, RegisterID callTag) { UNUSED_PARAM(callTag); return call(address, NoPtrTag); } - void callOperation(const CodePtr operation) + template + void callOperation(const CodePtr operation) { auto temp = temps(); loadImmediate(TrustedImmPtr(operation.taggedPtr()), temp.data()); @@ -4191,6 +4247,7 @@ class MacroAssemblerRISCV64 : public AbstractMacroAssembler { Jump branchTestFinalize(ResultCondition cond, RegisterID src) { switch (cond) { + case Carry: case Overflow: break; case Signed: diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h index 7ea97dca..02316047 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008-2022 Apple Inc. All rights reserved. + * Copyright (C) 2008-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -79,6 +79,7 @@ class MacroAssemblerX86Common : public AbstractMacroAssembler { }; enum ResultCondition { + Carry = X86Assembler::ConditionC, Overflow = X86Assembler::ConditionO, Signed = X86Assembler::ConditionS, PositiveOrZero = X86Assembler::ConditionNS, @@ -4370,6 +4371,12 @@ class MacroAssemblerX86Common : public AbstractMacroAssembler { { X86Assembler::replaceWithJump(instructionStart.taggedPtr(), destination.taggedPtr()); } + + template + static void replaceWithNops(CodeLocationLabel instructionStart, size_t memoryToFillWithNopsInBytes) + { + X86Assembler::replaceWithNops(instructionStart.taggedPtr(), memoryToFillWithNopsInBytes); + } static ptrdiff_t maxJumpReplacementSize() { diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h index b594d9e0..fb554c12 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -250,7 +250,8 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { return result; } - void callOperation(const CodePtr operation) + template + void callOperation(const CodePtr operation) { move(TrustedImmPtr(operation.taggedPtr()), scratchRegister()); m_assembler.call(scratchRegister()); @@ -577,8 +578,17 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { m_assembler.popcntq_mr(src.offset, src.base, dst); } + void addUnsignedRightShift32(RegisterID src1, RegisterID src2, TrustedImm32 amount, RegisterID dest) + { + // dest = src1 + (src2 >> amount) + urshift32(src2, amount, scratchRegister()); + add32(src1, scratchRegister(), dest); + } + void lshift64(TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return; m_assembler.shlq_i8r(imm.m_value, dest); } @@ -632,6 +642,8 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { void rshift64(TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return; m_assembler.sarq_i8r(imm.m_value, dest); } @@ -669,6 +681,8 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { void urshift64(TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return; m_assembler.shrq_i8r(imm.m_value, dest); } @@ -706,6 +720,8 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { void rotateRight64(TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return; m_assembler.rorq_i8r(imm.m_value, dest); } @@ -743,6 +759,8 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { void rotateLeft64(TrustedImm32 imm, RegisterID dest) { + if (UNLIKELY(!imm.m_value)) + return; m_assembler.rolq_i8r(imm.m_value, dest); } @@ -1247,6 +1265,23 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { transfer64(src, dest); } + void transfer32(BaseIndex src, BaseIndex dest) + { + load32(src, scratchRegister()); + store32(scratchRegister(), dest); + } + + void transfer64(BaseIndex src, BaseIndex dest) + { + load64(src, scratchRegister()); + store64(scratchRegister(), dest); + } + + void transferPtr(BaseIndex src, BaseIndex dest) + { + transfer64(src, dest); + } + DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) { padBeforePatch(); @@ -1464,6 +1499,12 @@ class MacroAssemblerX86_64 : public MacroAssemblerX86Common { return branch32(cond, scratchRegister(), right); } + Jump branch32WithMemory16(RelationalCondition cond, Address left, RegisterID right) + { + MacroAssemblerHelpers::load16OnCondition(*this, cond, left, scratchRegister()); + return branch32(cond, scratchRegister(), right); + } + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) { return branch64(cond, left, right); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h b/vendor/webkit/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h index 862a5d7c..a38bc8fe 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h @@ -58,10 +58,6 @@ static constexpr size_t maxFrameExtentForSlowPathCall = 0; // First four args in registers, remaining 4 args on stack. static constexpr size_t maxFrameExtentForSlowPathCall = 24; -#elif CPU(MIPS) -// Though args are in registers, there need to be space on the stack for all args. -static constexpr size_t maxFrameExtentForSlowPathCall = 40; - #else #error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall" diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.cpp index 97fd5953..3ad6347d 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.cpp @@ -41,6 +41,7 @@ #include #include #include +#include namespace JSC { @@ -72,12 +73,6 @@ static constexpr uint32_t elfMachine = 0x3E; static constexpr uint32_t elfMachine = 0xB7; #elif CPU(ARM) static constexpr uint32_t elfMachine = 0x28; -#elif CPU(MIPS) -#if CPU(LITTLE_ENDIAN) -static constexpr uint32_t elfMachine = 0x0A; -#else -static constexpr uint32_t elfMachine = 0x08; -#endif #elif CPU(RISCV64) static constexpr uint32_t elfMachine = 0xF3; #endif @@ -125,6 +120,8 @@ struct CodeLoadRecord { } // namespace JITDump +WTF_MAKE_TZONE_ALLOCATED_IMPL(PerfLog); + PerfLog& PerfLog::singleton() { static LazyNeverDestroyed logger; @@ -158,16 +155,21 @@ static inline uint32_t getCurrentThreadID() PerfLog::PerfLog() { { - std::array filename; - snprintf(filename.data(), filename.size() - 1, "jit-%d.dump", getCurrentProcessID()); - filename[filename.size() - 1] = '\0'; - m_fd = open(filename.data(), O_CREAT | O_TRUNC | O_RDWR, 0666); + StringPrintStream filename; + if (auto* optionalDirectory = Options::jitDumpDirectory()) + filename.print(optionalDirectory); + else + filename.print("/tmp"); + filename.print("/jit-", getCurrentProcessID(), ".dump"); + m_fd = open(filename.toCString().data(), O_CREAT | O_TRUNC | O_RDWR, 0666); RELEASE_ASSERT(m_fd != -1); +#if OS(LINUX) // Linux perf command records this mmap operation in perf.data as a metadata to the JIT perf annotations. // We do not use this mmap-ed memory region actually. m_marker = mmap(nullptr, pageSize(), PROT_READ | PROT_EXEC, MAP_PRIVATE, m_fd, 0); RELEASE_ASSERT(m_marker != MAP_FAILED); +#endif m_file = fdopen(m_fd, "wb"); RELEASE_ASSERT(m_file); @@ -178,17 +180,16 @@ PerfLog::PerfLog() header.pid = getCurrentProcessID(); Locker locker { m_lock }; - write(&header, sizeof(JITDump::FileHeader)); - flush(); + write(locker, &header, sizeof(JITDump::FileHeader)); } -void PerfLog::write(const void* data, size_t size) +void PerfLog::write(const AbstractLocker&, const void* data, size_t size) { size_t result = fwrite(data, 1, size, m_file); RELEASE_ASSERT(result == size); } -void PerfLog::flush() +void PerfLog::flush(const AbstractLocker&) { fflush(m_file); } @@ -213,14 +214,20 @@ void PerfLog::log(CString&& name, const uint8_t* executableAddress, size_t size) record.codeSize = size; record.codeIndex = logger.m_codeIndex++; - logger.write(&record, sizeof(JITDump::CodeLoadRecord)); - logger.write(name.data(), name.length() + 1); - logger.write(executableAddress, size); - logger.flush(); + logger.write(locker, &record, sizeof(JITDump::CodeLoadRecord)); + logger.write(locker, name.data(), name.length() + 1); + logger.write(locker, executableAddress, size); dataLogLnIf(PerfLogInternal::verbose, name, " [", record.codeIndex, "] ", RawPointer(executableAddress), "-", RawPointer(executableAddress + size), " ", size); } +void PerfLog::flush() +{ + PerfLog& logger = singleton(); + Locker locker { logger.m_lock }; + logger.flush(locker); +} + } // namespace JSC #endif // ENABLE(ASSEMBLER) && (OS(LINUX) || OS(DARWIN)) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.h b/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.h index 4e06a048..b1923c7e 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/PerfLog.h @@ -31,23 +31,25 @@ #include #include #include +#include #include namespace JSC { class PerfLog { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(PerfLog); WTF_MAKE_NONCOPYABLE(PerfLog); friend class LazyNeverDestroyed; public: static void log(CString&&, const uint8_t* executableAddress, size_t); + static void flush(); private: PerfLog(); static PerfLog& singleton(); - void write(const void*, size_t) WTF_REQUIRES_LOCK(m_lock); - void flush() WTF_REQUIRES_LOCK(m_lock); + void write(const AbstractLocker&, const void*, size_t) WTF_REQUIRES_LOCK(m_lock); + void flush(const AbstractLocker&) WTF_REQUIRES_LOCK(m_lock); FILE* m_file { nullptr }; void* m_marker { nullptr }; diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.cpp index 128fe40b..0c3d3a0d 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2021 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,11 +28,15 @@ #if ENABLE(ASSEMBLER) +#include + namespace JSC { namespace Probe { static void flushDirtyStackPages(State*); +WTF_MAKE_TZONE_ALLOCATED_IMPL(Context); + void executeJSCJITProbe(State* state) { Context context(state); @@ -40,10 +44,6 @@ void executeJSCJITProbe(State* state) auto& cpu = context.cpu; void* originalLR = cpu.gpr(ARM64Registers::lr); void* originalPC = cpu.pc(); -#elif CPU(MIPS) - auto& cpu = context.cpu; - void* originalRA = cpu.gpr(MIPSRegisters::ra); - void* originalPC = cpu.pc(); #endif state->initializeStackFunction = nullptr; @@ -53,9 +53,6 @@ void executeJSCJITProbe(State* state) #if CPU(ARM64) // The ARM64 probe trampoline does not support changing both lr and pc. RELEASE_ASSERT(originalPC == cpu.pc() || originalLR == cpu.gpr(ARM64Registers::lr)); -#elif CPU(MIPS) - // The MIPS probe trampoline does not support changing both ra and pc. - RELEASE_ASSERT(originalPC == cpu.pc() || originalRA == cpu.gpr(MIPSRegisters::ra)); #endif if (context.hasWritesToFlush()) { diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.h b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.h index 6b6d64b9..fda86ce9 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeContext.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2021 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,6 +27,7 @@ #include "MacroAssembler.h" #include "ProbeStack.h" +#include #if ENABLE(ASSEMBLER) @@ -136,8 +137,6 @@ inline void*& CPUState::pc() return *reinterpret_cast(&spr(ARM64Registers::pc)); #elif CPU(ARM_THUMB2) return *reinterpret_cast(&gpr(ARMRegisters::pc)); -#elif CPU(MIPS) - return *reinterpret_cast(&spr(MIPSRegisters::pc)); #elif CPU(RISCV64) return *reinterpret_cast(&spr(RISCV64Registers::pc)); #else @@ -153,8 +152,6 @@ inline void*& CPUState::fp() return *reinterpret_cast(&gpr(ARM64Registers::fp)); #elif CPU(ARM_THUMB2) return *reinterpret_cast(&gpr(ARMRegisters::fp)); -#elif CPU(MIPS) - return *reinterpret_cast(&gpr(MIPSRegisters::fp)); #elif CPU(RISCV64) return *reinterpret_cast(&gpr(RISCV64Registers::fp)); #else @@ -170,8 +167,6 @@ inline void*& CPUState::sp() return *reinterpret_cast(&gpr(ARM64Registers::sp)); #elif CPU(ARM_THUMB2) return *reinterpret_cast(&gpr(ARMRegisters::sp)); -#elif CPU(MIPS) - return *reinterpret_cast(&gpr(MIPSRegisters::sp)); #elif CPU(RISCV64) return *reinterpret_cast(&gpr(RISCV64Registers::sp)); #else @@ -220,7 +215,7 @@ struct State { }; class Context { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Context); public: using RegisterID = MacroAssembler::RegisterID; using SPRegisterID = MacroAssembler::SPRegisterID; diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.cpp index 5372f80a..d6f4738e 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2021 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,7 @@ #include #include +#include #if ENABLE(ASSEMBLER) @@ -54,6 +55,8 @@ static void copyStackPage(void* dst, void* src, size_t size) #define copyStackPage(dst, src, size) std::memcpy(dst, src, size) #endif +WTF_MAKE_TZONE_ALLOCATED_IMPL(Page); + Page::Page(void* baseAddress) : m_baseLogicalAddress(baseAddress) , m_physicalAddressOffset(reinterpret_cast(&m_buffer) - reinterpret_cast(baseAddress)) @@ -99,6 +102,8 @@ void* Page::lowWatermarkFromVisitingDirtyChunks() return maxLowWatermark; } +WTF_MAKE_TZONE_ALLOCATED_IMPL(Stack); + Stack::Stack(Stack&& other) : m_stackBounds(WTFMove(other.m_stackBounds)) , m_pages(WTFMove(other.m_pages)) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.h b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.h index e6b49f91..35f0ea8d 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/ProbeStack.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2021 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,7 @@ #include "CPU.h" #include #include +#include #include #if ENABLE(ASSEMBLER) @@ -37,7 +38,7 @@ namespace JSC { namespace Probe { class Page { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Page); public: Page(void* baseAddress); @@ -143,7 +144,7 @@ class Page { }; class Stack { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Stack); public: Stack() : m_stackBounds(Thread::current().stack()) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/RISCV64Assembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/RISCV64Assembler.h index 98b6a0b6..73332f2d 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/RISCV64Assembler.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/RISCV64Assembler.h @@ -28,6 +28,7 @@ #if ENABLE(ASSEMBLER) && CPU(RISCV64) #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" #include "RISCV64Registers.h" #include @@ -1673,6 +1674,12 @@ class RISCV64Assembler { cacheFlush(from, sizeof(uint32_t) * 2); } + static void replaceWithNops(void* from, size_t memoryToFillWithNopsInBytes) + { + fillNops(from, memoryToFillWithNopsInBytes); + cacheFlush(from, memoryToFillWithNopsInBytes); + } + static void revertJumpReplacementToPatch(void* from, void* valuePtr) { uint32_t* location = reinterpret_cast(from); @@ -1694,8 +1701,7 @@ class RISCV64Assembler { __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end)); } - using CopyFunction = void*(&)(void*, const void*, size_t); - template + template static void fillNops(void* base, size_t size) { uint32_t* ptr = reinterpret_cast(base); @@ -1704,7 +1710,7 @@ class RISCV64Assembler { uint32_t nop = RISCV64Instructions::ADDI::construct(RISCV64Registers::zero, RISCV64Registers::zero, IImmediate::v()); for (size_t i = 0, n = size / sizeof(uint32_t); i < n; ++i) - copy(&ptr[i], &nop, sizeof(uint32_t)); + machineCodeCopy(&ptr[i], &nop, sizeof(uint32_t)); } typedef enum { diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/RegisterInfo.h b/vendor/webkit/Source/JavaScriptCore/assembler/RegisterInfo.h index 8be60b07..1bd660e0 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/RegisterInfo.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/RegisterInfo.h @@ -51,8 +51,6 @@ #include "X86Registers.h" #elif CPU(X86_64) #include "X86_64Registers.h" -#elif CPU(MIPS) -#include "MIPSRegisters.h" #elif CPU(ARM_THUMB2) #include "ARMv7Registers.h" #elif CPU(ARM64) diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/X86Assembler.h b/vendor/webkit/Source/JavaScriptCore/assembler/X86Assembler.h index fdb5320d..adfe1556 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/X86Assembler.h +++ b/vendor/webkit/Source/JavaScriptCore/assembler/X86Assembler.h @@ -6390,7 +6390,12 @@ class X86Assembler { WTF::unalignedStore(ptr, static_cast(OP_JMP_rel32)); WTF::unalignedStore(ptr + 1, static_cast(distance)); } - + + static void replaceWithNops(void* instructionStart, size_t memoryToFillWithNopsInBytes) + { + fillNops(instructionStart, memoryToFillWithNopsInBytes); + } + static ptrdiff_t maxJumpReplacementSize() { return 5; @@ -6502,9 +6507,7 @@ class X86Assembler { m_formatter.oneByteOp(OP_NOP); } - using CopyFunction = void*(&)(void*, const void*, size_t); - - template + template static void fillNops(void* base, size_t size) { UNUSED_PARAM(copy); diff --git a/vendor/webkit/Source/JavaScriptCore/assembler/testmasm.cpp b/vendor/webkit/Source/JavaScriptCore/assembler/testmasm.cpp index 5eb60b6c..dfe6db24 100644 --- a/vendor/webkit/Source/JavaScriptCore/assembler/testmasm.cpp +++ b/vendor/webkit/Source/JavaScriptCore/assembler/testmasm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2023 Apple Inc. All rights reserved. + * Copyright (C) 2017-2024 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include "InitializeThreading.h" #include "LinkBuffer.h" #include "ProbeContext.h" +#include "RegisterTZoneTypes.h" #include "StackAlignment.h" #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -248,9 +250,6 @@ bool isSpecialGPR(MacroAssembler::RegisterID id) #if CPU(ARM64) if (id == ARM64Registers::x18) return true; -#elif CPU(MIPS) - if (id == MIPSRegisters::zero || id == MIPSRegisters::k0 || id == MIPSRegisters::k1) - return true; #elif CPU(RISCV64) if (id == RISCV64Registers::zero || id == RISCV64Registers::ra || id == RISCV64Registers::gp || id == RISCV64Registers::tp) return true; @@ -263,7 +262,7 @@ MacroAssemblerCodeRef compile(Generator&& generate) CCallHelpers jit; generate(jit); LinkBuffer linkBuffer(jit, nullptr); - return FINALIZE_CODE(linkBuffer, JSEntryPtrTag, "testmasm compilation"); + return FINALIZE_CODE(linkBuffer, JSEntryPtrTag, nullptr, "testmasm compilation"); } template @@ -1357,7 +1356,7 @@ void testExtractUnsignedBitfield64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 64) { + if (width > 0 && lsb + width < 64) { auto ubfx64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1381,7 +1380,7 @@ void testInsertUnsignedBitfieldInZero32() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto ubfiz32 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1406,7 +1405,7 @@ void testInsertUnsignedBitfieldInZero64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 64) { + if (width > 0 && lsb + width < 64) { auto ubfiz64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1432,7 +1431,7 @@ void testInsertBitField32() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto bfi32 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1462,7 +1461,7 @@ void testInsertBitField64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 64) { + if (width > 0 && lsb + width < 64) { auto bfi64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1492,7 +1491,7 @@ void testExtractInsertBitfieldAtLowEnd32() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto bfxil32 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1522,7 +1521,7 @@ void testExtractInsertBitfieldAtLowEnd64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 64) { + if (width > 0 && lsb + width < 64) { auto bfxil64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1551,7 +1550,7 @@ void testClearBitField32() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto bfc32 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1576,7 +1575,7 @@ void testClearBitField64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto bfc64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1673,7 +1672,7 @@ void testInsertSignedBitfieldInZero32() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto insertSignedBitfieldInZero32 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1703,7 +1702,7 @@ void testInsertSignedBitfieldInZero64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 64) { + if (width > 0 && lsb + width < 64) { auto insertSignedBitfieldInZero64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1732,7 +1731,7 @@ void testExtractSignedBitfield32() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 32) { + if (width > 0 && lsb + width < 32) { auto extractSignedBitfield32 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1762,7 +1761,7 @@ void testExtractSignedBitfield64() Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; for (auto lsb : imms) { for (auto width : imms) { - if (lsb >= 0 && width > 0 && lsb + width < 64) { + if (width > 0 && lsb + width < 64) { auto extractSignedBitfield64 = compile([=] (CCallHelpers& jit) { emitFunctionPrologue(jit); @@ -1787,36 +1786,33 @@ void testExtractSignedBitfield64() void testExtractRegister32() { - Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; uint32_t datasize = CHAR_BIT * sizeof(uint32_t); for (auto n : int32Operands()) { for (auto m : int32Operands()) { - for (auto lsb : imms) { - if (0 <= lsb && lsb < datasize) { - auto extractRegister32 = compile([=] (CCallHelpers& jit) { - emitFunctionPrologue(jit); - - jit.extractRegister32(GPRInfo::argumentGPR0, - GPRInfo::argumentGPR1, - CCallHelpers::TrustedImm32(lsb), - GPRInfo::returnValueGPR); - - emitFunctionEpilogue(jit); - jit.ret(); - }); - - // ((n & mask) << highWidth) | (m >> lowWidth) - // Where: highWidth = datasize - lowWidth - // mask = (1 << lowWidth) - 1 - uint32_t highWidth = datasize - lsb; - uint32_t mask = (1U << lsb) - 1U; - uint32_t left = highWidth == datasize ? 0U : (n & mask) << highWidth; - uint32_t right = (static_cast(m) >> lsb); - uint32_t rhs = left | right; - uint32_t lhs = invoke(extractRegister32, n, m); - CHECK_EQ(lhs, rhs); - } + for (uint32_t lsb = 0; lsb < datasize; ++lsb) { + auto extractRegister32 = compile([=] (CCallHelpers& jit) { + emitFunctionPrologue(jit); + + jit.extractRegister32(GPRInfo::argumentGPR0, + GPRInfo::argumentGPR1, + CCallHelpers::TrustedImm32(lsb), + GPRInfo::returnValueGPR); + + emitFunctionEpilogue(jit); + jit.ret(); + }); + + // Test pattern: d = ((n & mask) << highWidth) | (m >>> lowWidth) + // Where: highWidth = datasize - lowWidth + // mask = (1 << lowWidth) - 1 + uint32_t highWidth = datasize - lsb; + uint32_t mask = (1U << (lsb % 32)) - 1U; + uint32_t left = (n & mask) << (highWidth % 32); + uint32_t right = (static_cast(m) >> (lsb % 32)); + uint32_t rhs = left | right; + uint32_t lhs = invoke(extractRegister32, n, m); + CHECK_EQ(lhs, rhs); } } } @@ -1824,33 +1820,33 @@ void testExtractRegister32() void testExtractRegister64() { - Vector imms = { 0, 1, 5, 7, 30, 31, 32, 42, 56, 62, 63, 64 }; uint64_t datasize = CHAR_BIT * sizeof(uint64_t); for (auto n : int64Operands()) { for (auto m : int64Operands()) { - for (auto lsb : imms) { - if (0 <= lsb && lsb < datasize) { - auto extractRegister64 = compile([=] (CCallHelpers& jit) { - emitFunctionPrologue(jit); - - jit.extractRegister64(GPRInfo::argumentGPR0, - GPRInfo::argumentGPR1, - CCallHelpers::TrustedImm32(lsb), - GPRInfo::returnValueGPR); - - emitFunctionEpilogue(jit); - jit.ret(); - }); - - uint64_t highWidth = datasize - lsb; - uint64_t mask = (1ULL << lsb) - 1ULL; - uint64_t left = highWidth == datasize ? 0ULL : (n & mask) << highWidth; - uint64_t right = (static_cast(m) >> lsb); - uint64_t rhs = left | right; - uint64_t lhs = invoke(extractRegister64, n, m); - CHECK_EQ(lhs, rhs); - } + for (uint32_t lsb = 0; lsb < datasize; ++lsb) { + auto extractRegister64 = compile([=] (CCallHelpers& jit) { + emitFunctionPrologue(jit); + + jit.extractRegister64(GPRInfo::argumentGPR0, + GPRInfo::argumentGPR1, + CCallHelpers::TrustedImm32(lsb), + GPRInfo::returnValueGPR); + + emitFunctionEpilogue(jit); + jit.ret(); + }); + + // Test pattern: d = ((n & mask) << highWidth) | (m >>> lowWidth) + // Where: highWidth = datasize - lowWidth + // mask = (1 << lowWidth) - 1 + uint64_t highWidth = datasize - lsb; + uint64_t mask = (1ULL << (lsb % 64)) - 1ULL; + uint64_t left = (n & mask) << (highWidth % 64); + uint64_t right = (static_cast(m) >> (lsb % 64)); + uint64_t rhs = left | right; + uint64_t lhs = invoke(extractRegister64, n, m); + CHECK_EQ(lhs, rhs); } } } @@ -3010,7 +3006,7 @@ void testZeroExtend48ToWord() }); auto zeroTop16Bits = [] (int64_t value) -> int64_t { - return value & (1ull << 48) - 1; + return value & ((1ull << 48) - 1); }; for (auto a : int64Operands()) @@ -4798,9 +4794,6 @@ void testProbePreservesGPRS() CHECK_EQ(cpu.gpr(id), testWord(id)); } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) { -#if CPU(MIPS) - if (!(id & 1)) -#endif CHECK_EQ(cpu.fpr(id), testWord64(id)); } }); @@ -4828,9 +4821,6 @@ void testProbePreservesGPRS() CHECK_EQ(cpu.gpr(id), originalState.gpr(id)); } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) -#if CPU(MIPS) - if (!(id & 1)) -#endif CHECK_EQ(cpu.fpr(id), originalState.fpr(id)); }); @@ -4846,7 +4836,7 @@ void testProbeModifiesStackPointer(WTF::Function compute CPUState originalState; void* originalSP { nullptr }; void* modifiedSP { nullptr }; -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) uintptr_t modifiedFlags { 0 }; #endif @@ -4880,7 +4870,7 @@ void testProbeModifiesStackPointer(WTF::Function compute cpu.fpr(id) = bitwise_cast(testWord64(id)); } -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !(CPU(RISCV64)) originalState.spr(flagsSPR) = cpu.spr(flagsSPR); modifiedFlags = originalState.spr(flagsSPR) ^ flagsMask; cpu.spr(flagsSPR) = modifiedFlags; @@ -4905,11 +4895,8 @@ void testProbeModifiesStackPointer(WTF::Function compute CHECK_EQ(cpu.gpr(id), testWord(id)); } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) -#if CPU(MIPS) - if (!(id & 1)) -#endif CHECK_EQ(cpu.fpr(id), testWord64(id)); -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) CHECK_EQ(cpu.spr(flagsSPR) & flagsMask, modifiedFlags & flagsMask); #endif CHECK_EQ(cpu.sp(), modifiedSP); @@ -4926,7 +4913,7 @@ void testProbeModifiesStackPointer(WTF::Function compute } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) cpu.fpr(id) = originalState.fpr(id); -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) cpu.spr(flagsSPR) = originalState.spr(flagsSPR); #endif cpu.sp() = originalSP; @@ -4942,11 +4929,8 @@ void testProbeModifiesStackPointer(WTF::Function compute CHECK_EQ(cpu.gpr(id), originalState.gpr(id)); } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) -#if CPU(MIPS) - if (!(id & 1)) -#endif CHECK_EQ(cpu.fpr(id), originalState.fpr(id)); -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) CHECK_EQ(cpu.spr(flagsSPR) & flagsMask, originalState.spr(flagsSPR) & flagsMask); #endif CHECK_EQ(cpu.sp(), originalSP); @@ -5027,7 +5011,7 @@ void testProbeModifiesStackValues() CPUState originalState; void* originalSP { nullptr }; void* newSP { nullptr }; -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) uintptr_t modifiedFlags { 0 }; #endif size_t numberOfExtraEntriesToWrite { 10 }; // ARM64 requires that this be 2 word aligned. @@ -5063,7 +5047,7 @@ void testProbeModifiesStackValues() originalState.fpr(id) = cpu.fpr(id); cpu.fpr(id) = bitwise_cast(testWord64(id)); } -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) originalState.spr(flagsSPR) = cpu.spr(flagsSPR); modifiedFlags = originalState.spr(flagsSPR) ^ flagsMask; cpu.spr(flagsSPR) = modifiedFlags; @@ -5101,11 +5085,8 @@ void testProbeModifiesStackValues() CHECK_EQ(cpu.gpr(id), testWord(id)); } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) -#if CPU(MIPS) - if (!(id & 1)) -#endif CHECK_EQ(cpu.fpr(id), testWord64(id)); -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) CHECK_EQ(cpu.spr(flagsSPR) & flagsMask, modifiedFlags & flagsMask); #endif CHECK_EQ(cpu.sp(), newSP); @@ -5131,7 +5112,7 @@ void testProbeModifiesStackValues() } for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) cpu.fpr(id) = originalState.fpr(id); -#if !(CPU(MIPS) || CPU(RISCV64)) +#if !CPU(RISCV64) cpu.spr(flagsSPR) = originalState.spr(flagsSPR); #endif cpu.sp() = originalSP; @@ -5906,57 +5887,6 @@ void testStoreImmediateBaseIndex() #endif } -static void testCagePreservesPACFailureBit() -{ -#if GIGACAGE_ENABLED - // Placate ASan builds and any environments that disables the Gigacage. - if (!Gigacage::shouldBeEnabled()) - return; - - RELEASE_ASSERT(!Gigacage::disablingPrimitiveGigacageIsForbidden()); - auto cage = compile([] (CCallHelpers& jit) { - emitFunctionPrologue(jit); - constexpr GPRReg storageGPR = GPRInfo::argumentGPR0; - constexpr GPRReg lengthGPR = GPRInfo::argumentGPR1; - constexpr GPRReg scratchGPR = GPRInfo::argumentGPR2; - jit.cageConditionallyAndUntag(Gigacage::Primitive, storageGPR, lengthGPR, scratchGPR); - jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR); - emitFunctionEpilogue(jit); - jit.ret(); - }); - - void* ptr = Gigacage::tryMalloc(Gigacage::Primitive, 1); - void* taggedPtr = tagArrayPtr(ptr, 1); - RELEASE_ASSERT(hasOneBitSet(Gigacage::maxSize(Gigacage::Primitive) << 2)); - void* notCagedPtr = reinterpret_cast(reinterpret_cast(ptr) + (Gigacage::maxSize(Gigacage::Primitive) << 2)); - CHECK_NOT_EQ(Gigacage::caged(Gigacage::Primitive, notCagedPtr), notCagedPtr); - void* taggedNotCagedPtr = tagArrayPtr(notCagedPtr, 1); - - if (!isARM64E()) - CHECK_EQ(invoke(cage, taggedPtr, 2), ptr); - - CHECK_EQ(invoke(cage, taggedPtr, 1), ptr); - - auto cageWithoutAuthentication = compile([] (CCallHelpers& jit) { - emitFunctionPrologue(jit); - jit.cageWithoutUntagging(Gigacage::Primitive, GPRInfo::argumentGPR0); - jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR); - emitFunctionEpilogue(jit); - jit.ret(); - }); - - CHECK_EQ(invoke(cageWithoutAuthentication, taggedPtr), taggedPtr); - if (isARM64E()) { - CHECK_NOT_EQ(invoke(cageWithoutAuthentication, taggedNotCagedPtr), taggedNotCagedPtr); - CHECK_NOT_EQ(invoke(cageWithoutAuthentication, taggedNotCagedPtr), tagArrayPtr(notCagedPtr, 1)); - CHECK_NOT_EQ(invoke(cageWithoutAuthentication, taggedNotCagedPtr), taggedPtr); - CHECK_NOT_EQ(invoke(cageWithoutAuthentication, taggedNotCagedPtr), tagArrayPtr(ptr, 1)); - } - - Gigacage::free(Gigacage::Primitive, ptr); -#endif -} - static void testBranchIfType() { using JSC::JSType; @@ -6323,8 +6253,6 @@ void run(const char* filter) WTF_IGNORES_THREAD_SAFETY_ANALYSIS RUN(testStoreBaseIndex()); RUN(testStoreImmediateBaseIndex()); - RUN(testCagePreservesPACFailureBit()); - RUN(testBranchIfType()); RUN(testBranchIfNotType()); #if CPU(X86_64) || CPU(ARM64) @@ -6379,8 +6307,15 @@ static void run(const char*) #endif // ENABLE(JIT) -int main(int argc, char** argv) +int main(int argc, char** argv WTF_TZONE_EXTRA_MAIN_ARGS) { +#if USE(TZONE_MALLOC) + const char* boothash = GET_TZONE_SEED_FROM_ENV(darwinEnvp); + WTF_TZONE_INIT(boothash); + JSC::registerTZoneTypes(); + WTF_TZONE_REGISTRATION_DONE(); +#endif + const char* filter = nullptr; switch (argc) { case 1: diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsCFG.h b/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsCFG.h index 2e6a71aa..78010374 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsCFG.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsCFG.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,12 +29,13 @@ #include "B3CFG.h" #include +#include namespace JSC { namespace B3 { class BackwardsCFG : public BackwardsGraph { WTF_MAKE_NONCOPYABLE(BackwardsCFG); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(BackwardsCFG); public: BackwardsCFG(Procedure& proc) : BackwardsGraph(proc.cfg()) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsDominators.h b/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsDominators.h index bc1df09b..a413f811 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsDominators.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3BackwardsDominators.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,12 +29,13 @@ #include "B3BackwardsCFG.h" #include +#include namespace JSC { namespace B3 { class BackwardsDominators : public WTF::Dominators { WTF_MAKE_NONCOPYABLE(BackwardsDominators); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(BackwardsDominators); public: BackwardsDominators(Procedure& proc) : WTF::Dominators(proc.backwardsCFG()) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.cpp index c6213545..67e21cf6 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2020 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,11 +33,14 @@ #include "B3Procedure.h" #include "B3ValueInlines.h" #include +#include namespace JSC { namespace B3 { const char* const BasicBlock::dumpPrefix = "#"; +WTF_MAKE_TZONE_ALLOCATED_IMPL(BasicBlock); + BasicBlock::BasicBlock(unsigned index, double frequency) : m_index(index) , m_frequency(frequency) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.h b/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.h index 376138e1..17ecf0f1 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3BasicBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include "B3Origin.h" #include "B3SuccessorCollection.h" #include "B3Type.h" +#include #include namespace JSC { namespace B3 { @@ -44,7 +45,7 @@ template class GenericBlockInsertionSet; class BasicBlock { WTF_MAKE_NONCOPYABLE(BasicBlock); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(BasicBlock); public: typedef Vector ValueList; typedef Vector PredecessorList; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3CFG.h b/vendor/webkit/Source/JavaScriptCore/b3/B3CFG.h index 36606c9b..9f31c929 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3CFG.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3CFG.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,12 +31,13 @@ #include "B3Procedure.h" #include #include +#include namespace JSC { namespace B3 { class CFG { WTF_MAKE_NONCOPYABLE(CFG); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(CFG); public: typedef BasicBlock* Node; typedef IndexSet Set; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.cpp index dfb54333..cb380d68 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,9 +33,12 @@ #include "B3StackmapGenerationParams.h" #include "B3ValueInlines.h" #include "CCallHelpers.h" +#include namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(CheckSpecial); + using Inst = Air::Inst; using Arg = Air::Arg; using GenerationContext = Air::GenerationContext; @@ -101,9 +104,9 @@ CheckSpecial::~CheckSpecial() Inst CheckSpecial::hiddenBranch(const Inst& inst) const { Inst hiddenBranch(m_checkKind, inst.origin); - hiddenBranch.args.reserveInitialCapacity(m_numCheckArgs); - for (unsigned i = 0; i < m_numCheckArgs; ++i) - hiddenBranch.args.append(inst.args[i + 1]); + hiddenBranch.args.appendUsingFunctor(m_numCheckArgs, [&](size_t i) { + return inst.args[i + 1]; + }); ASSERT(hiddenBranch.isTerminal()); return hiddenBranch; } diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.h b/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.h index 13f471ec..6933d802 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3CheckSpecial.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2019 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "AirKind.h" #include "B3StackmapSpecial.h" #include +#include namespace JSC { namespace B3 { @@ -49,6 +50,7 @@ struct Inst; // - CheckMul(a, b), which turns into Mul32 b, a but we pass Any for a's ValueRep. class CheckSpecial final : public StackmapSpecial { + WTF_MAKE_TZONE_ALLOCATED(CheckSpecial); public: // Support for hash consing these things. class Key { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Compile.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3Compile.cpp index 5d3b0c2b..cce0d3ea 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Compile.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Compile.cpp @@ -46,7 +46,7 @@ Compilation compile(Procedure& proc) generate(proc, jit); LinkBuffer linkBuffer(jit, nullptr); - return Compilation(FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "Compilation"), proc.releaseByproducts()); + return Compilation(FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "Compilation"), proc.releaseByproducts()); } } } // namespace JSC::B3 diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.cpp index f4e68bca..7fe3168b 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,8 +28,12 @@ #if ENABLE(B3_JIT) +#include + namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(DataSection); + DataSection::DataSection(size_t size) : m_data(fastZeroedMalloc(size)) , m_size(size) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.h b/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.h index dff14250..f224ebdf 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3DataSection.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,10 +28,12 @@ #if ENABLE(B3_JIT) #include "JITOpaqueByproduct.h" +#include namespace JSC { namespace B3 { class DataSection final : public OpaqueByproduct { + WTF_MAKE_TZONE_ALLOCATED(DataSection); public: DataSection(size_t size); ~DataSection() final; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Dominators.h b/vendor/webkit/Source/JavaScriptCore/b3/B3Dominators.h index 4a9d085f..f851a0e0 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Dominators.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Dominators.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,14 +30,14 @@ #include "B3CFG.h" #include "B3Procedure.h" #include -#include #include +#include namespace JSC { namespace B3 { class Dominators : public WTF::Dominators { WTF_MAKE_NONCOPYABLE(Dominators); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Dominators); public: Dominators(Procedure& proc) : WTF::Dominators(proc.cfg()) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3EliminateDeadCode.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3EliminateDeadCode.cpp index 290de5f3..e269bf0a 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3EliminateDeadCode.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3EliminateDeadCode.cpp @@ -97,7 +97,7 @@ bool eliminateDeadCodeImpl(Procedure& proc) changed = true; } } - block->values().resize(targetIndex); + block->values().shrink(targetIndex); } for (Variable* variable : proc.variables()) { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3LowerToAir.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3LowerToAir.cpp index 23282679..c8c7e480 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3LowerToAir.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3LowerToAir.cpp @@ -3350,9 +3350,18 @@ class LowerToAir { Value* left = m_value->child(0); Value* right = m_value->child(1); - // EXTR Pattern: d = ((n & mask) << highWidth) | (m >> lowWidth) - // Where: highWidth = datasize - lowWidth - // mask = (1 << lowWidth) - 1 + // Turn this: d = ((n & mask) << highWidth) | (m >>> lowWidth) + // Into this: EXTR Rd Rn Rm lsb + // + // Rn Rm + // |<---datasize--->|<---datasize--->| + // |<---datasize--->|<-lsb->| + // Rd + // + // Conditions: + // lowWidth = lsb (0 <= lsb < datasize) + // highWidth = datasize - lowWidth + // mask = (1 << lowWidth) - 1 auto tryAppendEXTR = [&] (Value* left, Value* right) -> bool { Air::Opcode opcode = opcodeForType(ExtractRegister32, ExtractRegister64, m_value->type()); if (!isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Imm, Arg::Tmp)) @@ -3379,9 +3388,12 @@ class LowerToAir { uint64_t highWidth = highWidthValue->asInt(); uint64_t lowWidth = lowWidthValue->asInt(); uint64_t datasize = opcode == ExtractRegister32 ? 32 : 64; + // Note that when `lowWidth == datasize` we cannot turn it to `MOV Rd Rn` since + // `m >>> lowWidth` means `m >>> (lowWidth % datasize)` in JavaScript. if (lowWidth + highWidth != datasize || maskBitCount != lowWidth || lowWidth == datasize) return false; + ASSERT(lowWidth < datasize); append(opcode, tmp(nValue), tmp(mValue), imm(lowWidthValue), tmp(m_value)); return true; }; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3NaturalLoops.h b/vendor/webkit/Source/JavaScriptCore/b3/B3NaturalLoops.h index 4a1836f0..a9e6a7c3 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3NaturalLoops.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3NaturalLoops.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include "B3Dominators.h" #include +#include namespace JSC { namespace B3 { @@ -36,7 +37,7 @@ typedef WTF::NaturalLoop NaturalLoop; class NaturalLoops : public WTF::NaturalLoops { WTF_MAKE_NONCOPYABLE(NaturalLoops); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(NaturalLoops); public: NaturalLoops(Procedure& proc) : WTF::NaturalLoops(proc.cfg(), proc.dominators()) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Origin.h b/vendor/webkit/Source/JavaScriptCore/b3/B3Origin.h index 95d32111..0f1c64a8 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Origin.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Origin.h @@ -46,7 +46,7 @@ class Origin { const void* data() const { return m_data; } - friend bool operator==(Origin, Origin) = default; + friend bool operator==(const Origin&, const Origin&) = default; // You should avoid using this. Use OriginDump instead. void dump(PrintStream&) const; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.cpp index 3b9b4e24..fce6f926 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,9 +29,12 @@ #if ENABLE(B3_JIT) #include "B3ValueInlines.h" +#include namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(PhiChildren); + PhiChildren::PhiChildren(Procedure& proc) : m_upsilons(proc.values().size()) { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.h b/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.h index bea941b8..80550ab0 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3PhiChildren.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,11 +31,12 @@ #include "B3UpsilonValue.h" #include #include +#include namespace JSC { namespace B3 { class PhiChildren { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(PhiChildren); public: PhiChildren(Procedure&); ~PhiChildren(); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.cpp index b7fc48f4..91cde37a 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2021 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -41,9 +41,12 @@ #include "B3ValueInlines.h" #include "B3Variable.h" #include "JITOpaqueByproducts.h" +#include namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(Procedure); + Procedure::Procedure(bool usesSIMD) : m_cfg(new CFG(*this)) , m_lastPhaseName("initial") diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.h b/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.h index 3778442d..e6c8e111 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Procedure.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2021 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -78,7 +79,7 @@ typedef SharedTask WasmBoundsCheckGenerator; class Procedure { WTF_MAKE_NONCOPYABLE(Procedure); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Procedure); public: JS_EXPORT_PRIVATE Procedure(bool usesSIMD = false); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3ReduceStrength.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3ReduceStrength.cpp index 39aff0da..93548ceb 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3ReduceStrength.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3ReduceStrength.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2022 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -1194,7 +1194,7 @@ class ReduceStrength { && m_value->child(1)->hasInt()) { uint64_t shiftAmount = m_value->child(0)->child(1)->asInt(); uint64_t maskShift = m_value->child(1)->asInt(); - uint64_t maskShiftAmount = WTF::countTrailingZeros(maskShift); + uint64_t maskShiftAmount = WTF::ctz(maskShift); uint64_t mask = maskShift >> maskShiftAmount; uint64_t width = WTF::bitCount(mask); uint64_t datasize = m_value->child(0)->child(0)->type() == Int64 ? 64 : 32; @@ -1602,7 +1602,7 @@ class ReduceStrength { && m_value->child(1)->asInt() >= 0) { uint64_t shiftAmount = m_value->child(1)->asInt(); uint64_t maskShift = m_value->child(0)->child(1)->asInt(); - uint64_t maskShiftAmount = WTF::countTrailingZeros(maskShift); + uint64_t maskShiftAmount = WTF::ctz(maskShift); uint64_t mask = maskShift >> maskShiftAmount; uint64_t width = WTF::bitCount(mask); uint64_t datasize = m_value->child(0)->child(0)->type() == Int64 ? 64 : 32; @@ -1981,11 +1981,18 @@ class ReduceStrength { break; case Trunc: - // Turn this: Trunc(constant) - // Into this: static_cast(constant) - if (m_value->child(0)->hasInt64() || m_value->child(0)->hasDouble()) { - replaceWithNewValue( - m_proc.addIntConstant(m_value, static_cast(m_value->child(0)->asInt64()))); + // Turn this: Trunc(int64Constant) + // Into this: static_cast(int64Constant) + if (m_value->child(0)->hasInt64()) { + replaceWithNewValue(m_proc.addIntConstant(m_value, static_cast(m_value->child(0)->asInt64()))); + break; + } + + // Turn this: Trunc(doubleConstant) + // Into this: bitwise_cast(static_cast(bitwise_cast(doubleConstant))) + if (m_value->child(0)->hasDouble()) { + double value = m_value->child(0)->asDouble(); + replaceWithNewValue(m_proc.addConstant(m_value->origin(), m_value->type(), bitwise_cast(value))); break; } @@ -3101,7 +3108,7 @@ class ReduceStrength { cloneValue(m_value); // Remove the values from the predecessor. - predecessor->values().resize(startIndex); + predecessor->values().shrink(startIndex); predecessor->appendNew(m_proc, Branch, source->origin(), predicate); predecessor->setSuccessors(FrequentedBlock(cases[0]), FrequentedBlock(cases[1])); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp index 7115b584..d516141a 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2018 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,9 +31,12 @@ #include "AirCode.h" #include "AirGenerationContext.h" #include "B3ValueInlines.h" +#include namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(StackmapSpecial); + using Arg = Air::Arg; using Inst = Air::Inst; using Tmp = Air::Tmp; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.h b/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.h index 2238058f..6bfcb458 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3StackmapSpecial.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "AirArg.h" #include "AirSpecial.h" #include "B3ValueRep.h" +#include namespace JSC { namespace B3 { @@ -40,6 +41,7 @@ namespace Air { class Code; } // Stackmap. class StackmapSpecial : public Air::Special { + WTF_MAKE_TZONE_ALLOCATED_EXPORT(StackmapSpecial, JS_EXPORT_PRIVATE); public: StackmapSpecial(); ~StackmapSpecial() override; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3TZoneImpls.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3TZoneImpls.cpp new file mode 100644 index 00000000..c7ee5f96 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3TZoneImpls.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2023 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(B3_JIT) + +#include "B3BackwardsCFG.h" +#include "B3BackwardsDominators.h" +#include "B3CFG.h" +#include "B3Dominators.h" +#include "B3NaturalLoops.h" +#include + +namespace JSC { namespace B3 { + +WTF_MAKE_TZONE_ALLOCATED_IMPL(BackwardsCFG); +WTF_MAKE_TZONE_ALLOCATED_IMPL(BackwardsDominators); +WTF_MAKE_TZONE_ALLOCATED_IMPL(CFG); +WTF_MAKE_TZONE_ALLOCATED_IMPL(Dominators); +WTF_MAKE_TZONE_ALLOCATED_IMPL(NaturalLoops); + +} } // namespace JSC::B3 + +#endif // ENABLE(B3_JIT) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Type.h b/vendor/webkit/Source/JavaScriptCore/b3/B3Type.h index 37d4ba95..a29af5ce 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Type.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Type.h @@ -25,7 +25,7 @@ #pragma once -#if ENABLE(B3_JIT) +#if ENABLE(B3_JIT) || ENABLE(WEBASSEMBLY_BBQJIT) #include "B3Common.h" #include "SIMDInfo.h" @@ -77,7 +77,7 @@ class Type { inline bool isTuple() const; inline bool isVector() const; - friend bool operator==(Type, Type) = default; + friend bool operator==(const Type&, const Type&) = default; private: TypeKind m_kind { Void }; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Value.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3Value.cpp index 63194e6e..ab20c640 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Value.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Value.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2020 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -45,15 +45,18 @@ #include #include #include +#include #include namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(Value); + +#if ASSERT_ENABLED namespace B3ValueInternal { constexpr bool alwaysDumpConstructionSite = false; } -#if ASSERT_ENABLED String Value::generateCompilerConstructionSite() { StringPrintStream s; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Value.h b/vendor/webkit/Source/JavaScriptCore/b3/B3Value.h index 9fe4e5ac..65490b67 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Value.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Value.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -37,9 +37,9 @@ #include "B3ValueKey.h" #include "B3Width.h" #include -#include #include #include +#include #include namespace JSC { namespace B3 { @@ -52,7 +52,7 @@ class PhiChildren; class Procedure; class JS_EXPORT_PRIVATE Value { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Value); public: static const char* const dumpPrefix; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.cpp index 78f1dfae..8dc7edc3 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2018 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,9 +30,12 @@ #include "AssemblyHelpers.h" #include "JSCJSValueInlines.h" +#include namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(ValueRep); + void ValueRep::addUsedRegistersTo(bool isSIMDContext, RegisterSetBuilder& set) const { switch (m_kind) { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.h b/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.h index 662a6ce3..aef3c20c 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3ValueRep.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2018 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -25,7 +25,7 @@ #pragma once -#if ENABLE(B3_JIT) +#if ENABLE(B3_JIT) || ENABLE(WEBASSEMBLY_BBQJIT) #include "FPRInfo.h" #include "GPRInfo.h" @@ -34,6 +34,7 @@ #include "RegisterSet.h" #include "ValueRecovery.h" #include +#include #if ENABLE(WEBASSEMBLY) #include "WasmValueLocation.h" #endif @@ -50,7 +51,7 @@ namespace B3 { // output. class ValueRep { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(ValueRep); public: enum Kind : uint8_t { // As an input representation, this means that B3 can pick any representation. As an output diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.cpp b/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.cpp index 2314ee2d..966edbaa 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,8 +28,12 @@ #if ENABLE(B3_JIT) +#include + namespace JSC { namespace B3 { +WTF_MAKE_TZONE_ALLOCATED_IMPL(Variable); + Variable::~Variable() { } diff --git a/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.h b/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.h index 99c43852..52883685 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/B3Variable.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,9 +31,9 @@ #include "B3SparseCollection.h" #include "B3Type.h" #include "B3Width.h" -#include #include #include +#include namespace JSC { namespace B3 { @@ -41,7 +41,7 @@ class Procedure; class Variable { WTF_MAKE_NONCOPYABLE(Variable); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Variable); public: ~Variable(); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.cpp index 1effe978..96467052 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Apple Inc. All rights reserved. + * Copyright (C) 2019-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -39,6 +39,7 @@ #include "DisallowMacroScratchRegisterUsage.h" #include "Reg.h" #include +#include namespace JSC { namespace B3 { namespace Air { @@ -46,6 +47,8 @@ namespace GenerateAndAllocateRegistersInternal { static bool verbose = false; } +WTF_MAKE_TZONE_ALLOCATED_IMPL(GenerateAndAllocateRegisters); + GenerateAndAllocateRegisters::GenerateAndAllocateRegisters(Code& code) : m_code(code) , m_map(code) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.h index c5338e73..f37ef05c 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackAndGenerateCode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019 Apple Inc. All rights reserved. + * Copyright (C) 2019-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "AirLiveness.h" #include "AirTmpMap.h" #include +#include namespace JSC { @@ -40,7 +41,7 @@ namespace B3 { namespace Air { class Code; class GenerateAndAllocateRegisters { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(GenerateAndAllocateRegisters); WTF_MAKE_NONMOVABLE(GenerateAndAllocateRegisters); struct TmpData { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersByGraphColoring.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersByGraphColoring.cpp index 91862c9f..5d3741e7 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersByGraphColoring.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirAllocateRegistersByGraphColoring.cpp @@ -75,8 +75,8 @@ class AbstractColoringAllocator { dataLogLn("]"); } - m_adjacencyList.resize(tmpArraySize); - m_moveList.resize(tmpArraySize); + m_adjacencyList.grow(tmpArraySize); + m_moveList.grow(tmpArraySize); m_isOnSelectStack.ensureSize(tmpArraySize); m_spillWorklist.ensureSize(tmpArraySize); } @@ -215,7 +215,9 @@ class AbstractColoringAllocator { const auto& adjacentsOfU = m_adjacencyList[u]; const auto& adjacentsOfV = m_adjacencyList[v]; - Vector highOrderAdjacents; + std::array highOrderAdjacents; + size_t highOrderAdjacentsSize = 0; + RELEASE_ASSERT(registerCount() <= MacroAssembler::numGPRs + MacroAssembler::numFPRs); unsigned numCandidates = adjacentsOfU.size() + adjacentsOfV.size(); if (numCandidates < registerCount()) { @@ -228,16 +230,16 @@ class AbstractColoringAllocator { ASSERT(adjacentTmpIndex != u); numCandidates--; if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= registerCount()) { - ASSERT(std::find(highOrderAdjacents.begin(), highOrderAdjacents.end(), adjacentTmpIndex) == highOrderAdjacents.end()); - highOrderAdjacents.uncheckedAppend(adjacentTmpIndex); - if (highOrderAdjacents.size() >= registerCount()) + ASSERT(std::find(highOrderAdjacents.begin(), highOrderAdjacents.begin() + highOrderAdjacentsSize, adjacentTmpIndex) == highOrderAdjacents.begin() + highOrderAdjacentsSize); + highOrderAdjacents[highOrderAdjacentsSize++] = adjacentTmpIndex; + if (highOrderAdjacentsSize >= registerCount()) return false; - } else if (highOrderAdjacents.size() + numCandidates < registerCount()) + } else if (highOrderAdjacentsSize + numCandidates < registerCount()) return true; } ASSERT(numCandidates == adjacentsOfV.size()); - auto iteratorEndHighOrderAdjacentsOfU = highOrderAdjacents.end(); + auto iteratorEndHighOrderAdjacentsOfU = highOrderAdjacents.begin() + highOrderAdjacentsSize; for (IndexType adjacentTmpIndex : adjacentsOfV) { ASSERT(adjacentTmpIndex != u); ASSERT(adjacentTmpIndex != v); @@ -245,16 +247,16 @@ class AbstractColoringAllocator { if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= registerCount() && std::find(highOrderAdjacents.begin(), iteratorEndHighOrderAdjacentsOfU, adjacentTmpIndex) == iteratorEndHighOrderAdjacentsOfU) { - ASSERT(std::find(iteratorEndHighOrderAdjacentsOfU, highOrderAdjacents.end(), adjacentTmpIndex) == highOrderAdjacents.end()); - highOrderAdjacents.uncheckedAppend(adjacentTmpIndex); - if (highOrderAdjacents.size() >= registerCount()) + ASSERT(std::find(iteratorEndHighOrderAdjacentsOfU, highOrderAdjacents.begin() + highOrderAdjacentsSize, adjacentTmpIndex) == highOrderAdjacents.begin() + highOrderAdjacentsSize); + highOrderAdjacents[highOrderAdjacentsSize++] = adjacentTmpIndex; + if (highOrderAdjacentsSize >= registerCount()) return false; - } else if (highOrderAdjacents.size() + numCandidates < registerCount()) + } else if (highOrderAdjacentsSize + numCandidates < registerCount()) return true; } ASSERT(!numCandidates); - ASSERT(highOrderAdjacents.size() < registerCount()); + ASSERT(highOrderAdjacentsSize < registerCount()); return true; } diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp index c7f48430..3f9d18ef 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2020 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,11 +30,14 @@ #include "B3BasicBlockUtils.h" #include +#include namespace JSC { namespace B3 { namespace Air { const char* const BasicBlock::dumpPrefix = "#"; +WTF_MAKE_TZONE_ALLOCATED_IMPL(BasicBlock); + void BasicBlock::setSuccessors(FrequentedBlock target) { m_successors.resize(1); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.h index 3c2d564b..2dac314b 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirBasicBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,8 +30,8 @@ #include "AirFrequentedBlock.h" #include "AirInst.h" #include "B3SuccessorCollection.h" -#include #include +#include namespace JSC { namespace B3 { @@ -46,7 +46,7 @@ class PhaseInsertionSet; class BasicBlock { WTF_MAKE_NONCOPYABLE(BasicBlock); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(BasicBlock); public: static const char* const dumpPrefix; static constexpr unsigned uninsertedIndex = UINT_MAX; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp index 96e44da3..b0fea19c 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2018 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,9 +29,12 @@ #if ENABLE(B3_JIT) #include "CCallHelpers.h" +#include namespace JSC { namespace B3 { namespace Air { +WTF_MAKE_TZONE_ALLOCATED_IMPL(CCallSpecial); + CCallSpecial::CCallSpecial(bool isSIMDContext) : m_isSIMDContext(isSIMDContext) { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.h index 5dabc8ca..5c89ed85 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCCallSpecial.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2019 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include "AirSpecial.h" #include "RegisterSet.h" +#include namespace JSC { namespace B3 { namespace Air { @@ -43,6 +44,7 @@ namespace JSC { namespace B3 { namespace Air { // the prologue, whichever happened sooner. class CCallSpecial final : public Special { + WTF_MAKE_TZONE_ALLOCATED(CCallSpecial); public: CCallSpecial(bool isSIMDContext); ~CCallSpecial() final; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCFG.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCFG.h index 2e45adb0..1ff57851 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCFG.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCFG.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,12 +31,13 @@ #include "AirCode.h" #include #include +#include namespace JSC { namespace B3 { namespace Air { class CFG { WTF_MAKE_NONCOPYABLE(CFG); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(CFG); public: typedef BasicBlock* Node; typedef IndexSet Set; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.cpp index b2dc1f1c..38ce270a 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2020 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -37,11 +37,15 @@ #include "CCallHelpers.h" #include #include +#include namespace JSC { namespace B3 { namespace Air { const char* const tierName = "Air "; +WTF_MAKE_TZONE_ALLOCATED_IMPL(CFG); +WTF_MAKE_TZONE_ALLOCATED_IMPL(Code); + static void defaultPrologueGenerator(CCallHelpers& jit, Code& code) { jit.emitFunctionPrologue(); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.h index 65584aa8..2b68bacd 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirCode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2021 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -41,6 +41,7 @@ #include #include #include +#include #include namespace JSC { @@ -78,7 +79,7 @@ extern const char* const tierName; class Code { WTF_MAKE_NONCOPYABLE(Code); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Code); public: ~Code(); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.cpp index c4a1d9ce..9eee20d0 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2022 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,9 +34,12 @@ #include "CCallHelpers.h" #include "Disassembler.h" #include "LinkBuffer.h" +#include namespace JSC { namespace B3 { namespace Air { +WTF_MAKE_TZONE_ALLOCATED_IMPL(Disassembler); + void Disassembler::startEntrypoint(CCallHelpers& jit) { m_entrypointStart = jit.labelIgnoringWatchpoints(); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.h index 2e1bd53e..667be346 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirDisassembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,8 +28,9 @@ #if ENABLE(B3_JIT) #include "MacroAssembler.h" +#include -namespace JSC { +namespace JSC { class CCallHelpers; class LinkBuffer; @@ -41,7 +42,7 @@ class Code; struct Inst; class Disassembler { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Disassembler); public: Disassembler() = default; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirLiveness.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirLiveness.h index ffa6a780..7c60d2f7 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirLiveness.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirLiveness.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,12 +31,13 @@ #include "CompilerTimingScope.h" #include "SuperSampler.h" #include +#include namespace JSC { namespace B3 { namespace Air { template class Liveness : public WTF::Liveness { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(Liveness); public: Liveness(Code& code) : WTF::Liveness(code.cfg(), code) diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirLivenessAdapter.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirLivenessAdapter.h index 8e74ec86..7d04e4da 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirLivenessAdapter.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirLivenessAdapter.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,7 +33,9 @@ #include "AirInstInlines.h" #include "AirStackSlot.h" #include "AirTmpInlines.h" +#include #include +#include namespace JSC { namespace B3 { namespace Air { @@ -43,6 +45,8 @@ static constexpr bool verbose = false; template struct LivenessAdapter { + WTF_FORBID_HEAP_ALLOCATION; +public: typedef Air::CFG CFG; typedef Vector ActionsList; @@ -145,6 +149,8 @@ struct LivenessAdapter { template struct TmpLivenessAdapter : LivenessAdapter> { + WTF_MAKE_TZONE_ALLOCATED(TmpLivenessAdapter); +public: typedef LivenessAdapter> Base; static constexpr const char* name = "TmpLiveness"; @@ -166,6 +172,8 @@ struct TmpLivenessAdapter : LivenessAdapter { + WTF_MAKE_TZONE_ALLOCATED(UnifiedTmpLivenessAdapter); +public: typedef LivenessAdapter Base; static constexpr const char* name = "UnifiedTmpLiveness"; @@ -189,6 +197,8 @@ struct UnifiedTmpLivenessAdapter : LivenessAdapter { }; struct StackSlotLivenessAdapter : LivenessAdapter { + WTF_MAKE_TZONE_ALLOCATED(StackSlotLivenessAdapter); +public: static constexpr const char* name = "StackSlotLiveness"; typedef StackSlot* Thing; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirOpcode.opcodes b/vendor/webkit/Source/JavaScriptCore/b3/air/AirOpcode.opcodes index a13a5c23..b4d9729d 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirOpcode.opcodes +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirOpcode.opcodes @@ -107,8 +107,6 @@ Nop -Breakpoint - Add32 U:G:32, U:G:32, ZD:G:32 Imm, Tmp, Tmp Tmp, Tmp, Tmp diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirOptimizePairedLoadStore.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirOptimizePairedLoadStore.cpp index e6fc4d11..8321ba03 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirOptimizePairedLoadStore.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirOptimizePairedLoadStore.cpp @@ -32,7 +32,9 @@ #include "AirArgInlines.h" #include "AirCode.h" #include "AirInst.h" +#include "AirInstInlines.h" #include "AirPhaseScope.h" +#include "CCallHelpers.h" #include namespace JSC { namespace B3 { namespace Air { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.cpp index f4dd831d..bd520d50 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,9 +30,12 @@ #include "CCallHelpers.h" #include "MacroAssemblerPrinter.h" +#include namespace JSC { namespace B3 { namespace Air { +WTF_MAKE_TZONE_ALLOCATED_IMPL(PrintSpecial); + PrintSpecial::PrintSpecial(Printer::PrintRecordList* list) : m_printRecordList(list) { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.h index c56752a8..e2e7341c 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirPrintSpecial.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017-2019 Apple Inc. All rights reserved. + * Copyright (C) 2017-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "AirInst.h" #include "AirSpecial.h" #include "MacroAssemblerPrinter.h" +#include namespace JSC { @@ -92,6 +93,7 @@ struct Printer : public PrintRecord { namespace B3 { namespace Air { class PrintSpecial final : public Special { + WTF_MAKE_TZONE_ALLOCATED(PrintSpecial); public: PrintSpecial(Printer::PrintRecordList*); ~PrintSpecial() final; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.cpp index e825767b..0c0ea12f 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,11 +30,14 @@ #include #include +#include namespace JSC { namespace B3 { namespace Air { const char* const Special::dumpPrefix = "&"; +WTF_MAKE_TZONE_ALLOCATED_IMPL(Special); + Special::Special() { } diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.h index 493a0df9..a9b7bec6 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirSpecial.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include #include #include +#include #include namespace JSC { namespace B3 { namespace Air { @@ -41,7 +42,7 @@ struct GenerationContext; class Special { WTF_MAKE_NONCOPYABLE(Special); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED_EXPORT(Special, JS_EXPORT_PRIVATE); public: static const char* const dumpPrefix; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.cpp index 6eda4c20..d7ce8a9c 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,8 +28,12 @@ #if ENABLE(B3_JIT) +#include + namespace JSC { namespace B3 { namespace Air { +WTF_MAKE_TZONE_ALLOCATED_IMPL(StackSlot); + void StackSlot::setOffsetFromFP(intptr_t value) { m_offsetFromFP = value; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.h index 071a5dc8..9d92ad61 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirStackSlot.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,6 +33,7 @@ #include #include #include +#include namespace JSC { namespace B3 { @@ -42,7 +43,7 @@ namespace Air { class StackSlot { WTF_MAKE_NONCOPYABLE(StackSlot); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(StackSlot); public: unsigned byteSize() const { return m_byteSize; } StackSlotKind kind() const { return m_kind; } diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirTZoneImpls.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/AirTZoneImpls.cpp new file mode 100644 index 00000000..1fd350ba --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirTZoneImpls.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2023 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "AirLiveness.h" +#include "AirLivenessAdapter.h" +#include + +namespace JSC { namespace B3 { namespace Air { + +WTF_MAKE_TZONE_ALLOCATED_IMPL_TEMPLATE(GPLiveness); +WTF_MAKE_TZONE_ALLOCATED_IMPL_TEMPLATE(FPLiveness); +WTF_MAKE_TZONE_ALLOCATED_IMPL_TEMPLATE(StackSlotLiveness); +WTF_MAKE_TZONE_ALLOCATED_IMPL_TEMPLATE(UnifiedTmpLiveness); + + +} } } // namespace JSC::B3::Air diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/AirTmp.h b/vendor/webkit/Source/JavaScriptCore/b3/air/AirTmp.h index ab11a18c..40d1b71a 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/AirTmp.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/AirTmp.h @@ -185,7 +185,7 @@ class Tmp { return !!*this; } - friend bool operator==(Tmp, Tmp) = default; + friend bool operator==(const Tmp&, const Tmp&) = default; void dump(PrintStream& out) const; diff --git a/vendor/webkit/Source/JavaScriptCore/b3/air/testair.cpp b/vendor/webkit/Source/JavaScriptCore/b3/air/testair.cpp index ccb349c5..6e4b6e85 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/air/testair.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/air/testair.cpp @@ -41,12 +41,14 @@ #include "Options.h" #include "ProbeContext.h" #include "PureNaN.h" +#include "RegisterTZoneTypes.h" #include #include #include #include #include #include +#include #include #include #include @@ -95,7 +97,7 @@ std::unique_ptr compile(B3::Procedure& proc) LinkBuffer linkBuffer(jit, nullptr); return makeUnique( - FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "testair compilation"), proc.releaseByproducts()); + FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "testair compilation"), proc.releaseByproducts()); } template @@ -2776,8 +2778,15 @@ static void run(const char*) #endif // ENABLE(B3_JIT) -int main(int argc, char** argv) +int main(int argc, char** argv WTF_TZONE_EXTRA_MAIN_ARGS) { +#if USE(TZONE_MALLOC) + const char* boothash = GET_TZONE_SEED_FROM_ENV(darwinEnvp); + WTF_TZONE_INIT(boothash); + JSC::registerTZoneTypes(); + WTF_TZONE_REGISTRATION_DONE(); +#endif + const char* filter = nullptr; switch (argc) { case 1: diff --git a/vendor/webkit/Source/JavaScriptCore/b3/testb3.h b/vendor/webkit/Source/JavaScriptCore/b3/testb3.h index dd34314b..917535ac 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/testb3.h +++ b/vendor/webkit/Source/JavaScriptCore/b3/testb3.h @@ -78,6 +78,7 @@ #include #include #include +#include #include #include #include @@ -1072,6 +1073,8 @@ void testAddShl32(); void testAddShl64(); void testAddShl65(); void testReduceStrengthReassociation(bool flip); +void testReduceStrengthTruncInt64Constant(int64_t filler, int32_t value); +void testReduceStrengthTruncDoubleConstant(double filler, float value); void testLoadBaseIndexShift2(); void testLoadBaseIndexShift32(); void testOptimizeMaterialization(); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/testb3_1.cpp b/vendor/webkit/Source/JavaScriptCore/b3/testb3_1.cpp index c268a626..904b2c0a 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/testb3_1.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/testb3_1.cpp @@ -26,6 +26,9 @@ #include "config.h" #include "testb3.h" +#include "RegisterTZoneTypes.h" +#include + #if ENABLE(B3_JIT) && !CPU(ARM) Lock crashLock; @@ -786,6 +789,8 @@ void run(const TestConfig* config) RUN(testPinRegisters()); RUN(testReduceStrengthReassociation(true)); RUN(testReduceStrengthReassociation(false)); + RUN_BINARY(testReduceStrengthTruncInt64Constant, int64Operands(), int32Operands()); + RUN_BINARY(testReduceStrengthTruncDoubleConstant, floatingPointOperands(), floatingPointOperands()); RUN(testAddShl32()); RUN(testAddShl64()); RUN(testAddShl65()); @@ -933,8 +938,15 @@ extern const JSC::JITOperationAnnotation startOfJITOperationsInTestB3 __asm("sec extern const JSC::JITOperationAnnotation endOfJITOperationsInTestB3 __asm("section$end$__DATA_CONST$__jsc_ops"); #endif -int main(int argc, char** argv) +int main(int argc, char** argv WTF_TZONE_EXTRA_MAIN_ARGS) { +#if USE(TZONE_MALLOC) + const char* boothash = GET_TZONE_SEED_FROM_ENV(darwinEnvp); + WTF_TZONE_INIT(boothash); + JSC::registerTZoneTypes(); + WTF_TZONE_REGISTRATION_DONE(); +#endif + TestConfig config; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-filter")) { diff --git a/vendor/webkit/Source/JavaScriptCore/b3/testb3_2.cpp b/vendor/webkit/Source/JavaScriptCore/b3/testb3_2.cpp index fb8182ae..ba1fea5b 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/testb3_2.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/testb3_2.cpp @@ -4882,14 +4882,12 @@ void testBitfieldZeroExtend64() void testExtractRegister32() { if (JSC::Options::defaultB3OptLevel() < 2) - return; - Vector lowWidths = { 0, 17, 31 }; + return; - // Test Pattern: ((n & mask1) << highWidth) | ((m & mask2) >> lowWidth) + // Test Pattern: d = ((n & mask) << highWidth) | (m >>> lowWidth) // Where: highWidth = datasize - lowWidth - // mask1 = (1 << lowWidth) - 1 - // mask2 = ~mask1 - auto test = [&] (uint32_t n, uint32_t m, uint32_t mask1, uint32_t mask2, uint32_t highWidth, uint32_t lowWidth) -> uint32_t { + // mask = (1 << lowWidth) - 1 + auto b3Test = [&] (uint32_t n, uint32_t m, uint32_t mask, uint32_t highWidth, uint32_t lowWidth, bool checkEmittedEXTR) -> uint32_t { Procedure proc; BasicBlock* root = proc.addBlock(); @@ -4899,100 +4897,96 @@ void testExtractRegister32() Value* mValue = root->appendNew( proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)); - Value* mask1Value = root->appendNew(proc, Origin(), mask1); - Value* mask2Value = root->appendNew(proc, Origin(), mask2); + Value* maskValue = root->appendNew(proc, Origin(), mask); Value* highWidthValue = root->appendNew(proc, Origin(), highWidth); Value* lowWidthValue = root->appendNew(proc, Origin(), lowWidth); - Value* leftAndValue = root->appendNew(proc, BitAnd, Origin(), nValue, mask1Value); + Value* leftAndValue = root->appendNew(proc, BitAnd, Origin(), nValue, maskValue); Value* left = root->appendNew(proc, Shl, Origin(), leftAndValue, highWidthValue); - - Value* rightAndValue = root->appendNew(proc, BitAnd, Origin(), mValue, mask2Value); - Value* right = root->appendNew(proc, ZShr, Origin(), rightAndValue, lowWidthValue); + Value* right = root->appendNew(proc, ZShr, Origin(), mValue, lowWidthValue); root->appendNewControlValue( proc, Return, Origin(), root->appendNew(proc, BitOr, Origin(), left, right)); auto code = compileProc(proc); - if (isARM64() && lowWidth > 0) + if (checkEmittedEXTR && isARM64() && 0 < lowWidth && lowWidth < 32) checkUsesInstruction(*code, "extr"); return invoke(*code, n, m); }; uint32_t datasize = CHAR_BIT * sizeof(uint32_t); + auto test = [&](uint32_t n, uint32_t m, uint32_t lowWidth, bool checkEmittedEXTR = false) { + uint32_t highWidth = datasize - lowWidth; + uint32_t mask = (1U << (lowWidth % datasize)) - 1U; + uint32_t left = (n & mask) << (highWidth % datasize); + uint32_t right = (m >> (lowWidth % datasize)); + uint32_t rhs = left | right; + uint32_t lhs = b3Test(n, m, mask, highWidth, lowWidth, checkEmittedEXTR); + CHECK(lhs == rhs); + }; + for (auto nOperand : int32Operands()) { for (auto mOperand : int32Operands()) { - for (auto lowWidth : lowWidths) { - uint32_t n = nOperand.value; - uint32_t m = mOperand.value; - uint32_t highWidth = datasize - lowWidth; - uint32_t mask1 = (1U << lowWidth) - 1U; - uint32_t mask2 = ~mask1; - uint32_t left = highWidth == datasize ? 0U : ((n & mask1) << highWidth); - uint32_t right = ((m & mask2) >> lowWidth); - uint32_t rhs = left | right; - uint32_t lhs = test(n, m, mask1, mask2, highWidth, lowWidth); - CHECK(lhs == rhs); - } + for (uint32_t lowWidth = 0; lowWidth <= datasize; ++lowWidth) + test(nOperand.value, mOperand.value, lowWidth); } } + + test(100, 200, 27, true); } void testExtractRegister64() { if (JSC::Options::defaultB3OptLevel() < 2) - return; - Vector lowWidths = { 0, 34, 63 }; + return; - // Test Pattern: ((n & mask1) << highWidth) | ((m & mask2) >> lowWidth) + // Test Pattern: d = ((n & mask) << highWidth) | (m >>> lowWidth) // Where: highWidth = datasize - lowWidth - // mask1 = (1 << lowWidth) - 1 - // mask2 = ~mask1 - auto test = [&] (uint64_t n, uint64_t m, uint64_t mask1, uint64_t mask2, uint64_t highWidth, uint64_t lowWidth) -> uint64_t { + // mask = (1 << lowWidth) - 1 + auto b3Test = [&] (uint64_t n, uint64_t m, uint64_t mask, uint64_t highWidth, uint64_t lowWidth, bool checkEmittedEXTR) -> uint64_t { Procedure proc; BasicBlock* root = proc.addBlock(); Value* nValue = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0); Value* mValue = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1); - Value* mask1Value = root->appendNew(proc, Origin(), mask1); - Value* mask2Value = root->appendNew(proc, Origin(), mask2); + Value* maskValue = root->appendNew(proc, Origin(), mask); Value* highWidthValue = root->appendNew(proc, Origin(), highWidth); Value* lowWidthValue = root->appendNew(proc, Origin(), lowWidth); - Value* leftAndValue = root->appendNew(proc, BitAnd, Origin(), nValue, mask1Value); + Value* leftAndValue = root->appendNew(proc, BitAnd, Origin(), nValue, maskValue); Value* left = root->appendNew(proc, Shl, Origin(), leftAndValue, highWidthValue); - - Value* rightAndValue = root->appendNew(proc, BitAnd, Origin(), mValue, mask2Value); - Value* right = root->appendNew(proc, ZShr, Origin(), rightAndValue, lowWidthValue); + Value* right = root->appendNew(proc, ZShr, Origin(), mValue, lowWidthValue); root->appendNewControlValue( proc, Return, Origin(), root->appendNew(proc, BitOr, Origin(), left, right)); auto code = compileProc(proc); - if (isARM64() && lowWidth > 0) + if (checkEmittedEXTR && isARM64() && 0 < lowWidth && lowWidth < 64) checkUsesInstruction(*code, "extr"); return invoke(*code, n, m); }; uint64_t datasize = CHAR_BIT * sizeof(uint64_t); + auto test = [&](uint64_t n, uint64_t m, uint32_t lowWidth, bool checkEmittedEXTR = false) { + uint64_t highWidth = datasize - lowWidth; + uint64_t mask = (1ULL << (lowWidth % datasize)) - 1ULL; + uint64_t left = (n & mask) << (highWidth % datasize); + uint64_t right = (m >> (lowWidth % datasize)); + uint64_t rhs = left | right; + uint64_t lhs = b3Test(n, m, mask, highWidth, lowWidth, checkEmittedEXTR); + CHECK(lhs == rhs); + }; + for (auto nOperand : int64Operands()) { for (auto mOperand : int64Operands()) { - for (auto lowWidth : lowWidths) { - uint64_t n = nOperand.value; - uint64_t m = mOperand.value; - uint64_t highWidth = datasize - lowWidth; - uint64_t mask1 = (1ULL << lowWidth) - 1ULL; - uint64_t mask2 = ~mask1; - uint64_t left = highWidth == datasize ? 0ULL : ((n & mask1) << highWidth); - uint64_t right = ((m & mask2) >> lowWidth); - uint64_t rhs = left | right; - uint64_t lhs = test(n, m, mask1, mask2, highWidth, lowWidth); - CHECK(lhs == rhs); - } + for (uint32_t lowWidth = 0; lowWidth <= datasize; ++lowWidth) + test(nOperand.value, mOperand.value, lowWidth); } } + + test(100, 200, 37, true); } void testAddWithLeftShift32() diff --git a/vendor/webkit/Source/JavaScriptCore/b3/testb3_6.cpp b/vendor/webkit/Source/JavaScriptCore/b3/testb3_6.cpp index f2bb5cf9..d4ca8e1f 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/testb3_6.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/testb3_6.cpp @@ -1944,7 +1944,7 @@ void testEntrySwitchSimple() CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.code().entrypointLabel(1)); CodeLocationLabel labelThree = linkBuffer.locationOf(proc.code().entrypointLabel(2)); - MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "testb3 compilation"); + MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "testb3 compilation"); CHECK(invoke(labelOne, 1, 2) == 3); CHECK(invoke(labelTwo, 1, 2) == -1); @@ -1977,7 +1977,7 @@ void testEntrySwitchNoEntrySwitch() CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.code().entrypointLabel(1)); CodeLocationLabel labelThree = linkBuffer.locationOf(proc.code().entrypointLabel(2)); - MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "testb3 compilation"); + MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "testb3 compilation"); CHECK_EQ(invoke(labelOne, 1, 2), 3); CHECK_EQ(invoke(labelTwo, 1, 2), 3); @@ -2064,7 +2064,7 @@ void testEntrySwitchWithCommonPaths() CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.code().entrypointLabel(1)); CodeLocationLabel labelThree = linkBuffer.locationOf(proc.code().entrypointLabel(2)); - MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "testb3 compilation"); + MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "testb3 compilation"); CHECK_EQ(invoke(labelOne, 1, 2, 10), 3); CHECK_EQ(invoke(labelTwo, 1, 2, 10), -1); @@ -2181,7 +2181,7 @@ void testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint() CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.code().entrypointLabel(1)); CodeLocationLabel labelThree = linkBuffer.locationOf(proc.code().entrypointLabel(2)); - MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "testb3 compilation"); + MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "testb3 compilation"); CHECK_EQ(invoke(labelOne, 1, 2, 10, false), 3); CHECK_EQ(invoke(labelTwo, 1, 2, 10, false), -1); @@ -2258,7 +2258,7 @@ void testEntrySwitchLoop() CodeLocationLabel labelOne = linkBuffer.locationOf(proc.code().entrypointLabel(0)); CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.code().entrypointLabel(1)); - MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, "testb3 compilation"); + MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, JITCompilationPtrTag, nullptr, "testb3 compilation"); CHECK(invoke(labelOne, 0) == 1); CHECK(invoke(labelOne, 42) == 43); diff --git a/vendor/webkit/Source/JavaScriptCore/b3/testb3_7.cpp b/vendor/webkit/Source/JavaScriptCore/b3/testb3_7.cpp index b5a79bf4..92c5e0ed 100644 --- a/vendor/webkit/Source/JavaScriptCore/b3/testb3_7.cpp +++ b/vendor/webkit/Source/JavaScriptCore/b3/testb3_7.cpp @@ -380,6 +380,43 @@ void testReduceStrengthReassociation(bool flip) (root->last()->child(0)->child(0)->child(0) == arg2 && root->last()->child(0)->child(0)->child(1) == arg1)); } +template +void testReduceStrengthTruncConstant(Type64 filler, Type32 value) +{ + Procedure proc; + BasicBlock* root = proc.addBlock(); + + int64_t bits = bitwise_cast(filler); + int32_t loBits = bitwise_cast(value); + bits = ((bits >> 32) << 32) | loBits; + Type64 testValue = bitwise_cast(bits); + + Value* b2 = root->appendNew(proc, Origin(), testValue); + Value* b3 = root->appendNew(proc, JSC::B3::Trunc, Origin(), b2); + root->appendNew(proc, Return, Origin(), b3); + + proc.resetReachability(); + + reduceStrength(proc); + + CHECK_EQ(root->last()->opcode(), Return); + if constexpr (std::is_same_v) { + CHECK_EQ(root->last()->child(0)->opcode(), ConstFloat); + CHECK(bitwise_cast(root->last()->child(0)->asFloat()) == bitwise_cast(value)); + } else + CHECK(root->last()->child(0)->isInt32(value)); +} + +void testReduceStrengthTruncInt64Constant(int64_t filler, int32_t value) +{ + testReduceStrengthTruncConstant(filler, value); +} + +void testReduceStrengthTruncDoubleConstant(double filler, float value) +{ + testReduceStrengthTruncConstant(filler, value); +} + void testLoadBaseIndexShift2() { Procedure proc; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/ArrayConstructor.js b/vendor/webkit/Source/JavaScriptCore/builtins/ArrayConstructor.js index 44d391e7..2a3cd717 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/ArrayConstructor.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/ArrayConstructor.js @@ -52,6 +52,12 @@ function from(items /*, mapFn, thisArg */) var arrayLike = @toObject(items, "Array.from requires an array-like object - not null or undefined"); + if (!mapFn) { + var fastResult = @arrayFromFast(this, arrayLike); + if (fastResult) + return fastResult; + } + var iteratorMethod = items.@@iterator; if (!@isUndefinedOrNull(iteratorMethod)) { if (!@isCallable(iteratorMethod)) diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/ArrayPrototype.js b/vendor/webkit/Source/JavaScriptCore/builtins/ArrayPrototype.js index 162fcc16..cf4165bb 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/ArrayPrototype.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/ArrayPrototype.js @@ -130,6 +130,7 @@ function forEach(callback /*, thisArg */) } } +@alwaysInline function filter(callback /*, thisArg */) { "use strict"; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp index d85ae472..9cd99550 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp @@ -30,9 +30,9 @@ namespace JSC { -UnlinkedFunctionExecutable* createBuiltinExecutable(VM& vm, const SourceCode& source, const Identifier& ident, ImplementationVisibility implementationVisibility, ConstructorKind kind, ConstructAbility ability) +UnlinkedFunctionExecutable* createBuiltinExecutable(VM& vm, const SourceCode& source, const Identifier& ident, ImplementationVisibility implementationVisibility, ConstructorKind kind, ConstructAbility ability, InlineAttribute inlineAttribute) { - return BuiltinExecutables::createExecutable(vm, source, ident, implementationVisibility, kind, ability, NeedsClassFieldInitializer::No); + return BuiltinExecutables::createExecutable(vm, source, ident, implementationVisibility, kind, ability, inlineAttribute, NeedsClassFieldInitializer::No); } } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h index d562f79b..c0e06a43 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h @@ -28,11 +28,12 @@ #include "ConstructAbility.h" #include "ConstructorKind.h" #include "ImplementationVisibility.h" +#include "InlineAttribute.h" #include "ParserModes.h" #include "SourceCode.h" namespace JSC { -JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility); +JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility, InlineAttribute); } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp index d2f85a70..acfaeba4 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014-2019 Apple Inc. All rights reserved. + * Copyright (C) 2014-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,9 +31,12 @@ #include "JSCJSValueInlines.h" #include "Parser.h" #include +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(BuiltinExecutables); + BuiltinExecutables::BuiltinExecutables(VM& vm) : m_vm(vm) , m_combinedSourceProvider(StringSourceProvider::create(StringImpl::createWithoutCopying(s_JSCCombinedCode, s_JSCCombinedCodeLength), { }, String(), SourceTaintedOrigin::Untainted)) @@ -67,18 +70,18 @@ UnlinkedFunctionExecutable* BuiltinExecutables::createDefaultConstructor(Constru break; case ConstructorKind::Base: case ConstructorKind::Extends: - return createExecutable(m_vm, defaultConstructorSourceCode(constructorKind), name, ImplementationVisibility::Public, constructorKind, ConstructAbility::CanConstruct, needsClassFieldInitializer, privateBrandRequirement); + return createExecutable(m_vm, defaultConstructorSourceCode(constructorKind), name, ImplementationVisibility::Public, constructorKind, ConstructAbility::CanConstruct, InlineAttribute::Always, needsClassFieldInitializer, privateBrandRequirement); } ASSERT_NOT_REACHED(); return nullptr; } -UnlinkedFunctionExecutable* BuiltinExecutables::createBuiltinExecutable(const SourceCode& code, const Identifier& name, ImplementationVisibility implementationVisibility, ConstructorKind constructorKind, ConstructAbility constructAbility) +UnlinkedFunctionExecutable* BuiltinExecutables::createBuiltinExecutable(const SourceCode& code, const Identifier& name, ImplementationVisibility implementationVisibility, ConstructorKind constructorKind, ConstructAbility constructAbility, InlineAttribute inlineAttribute) { - return createExecutable(m_vm, code, name, implementationVisibility, constructorKind, constructAbility, NeedsClassFieldInitializer::No); + return createExecutable(m_vm, code, name, implementationVisibility, constructorKind, constructAbility, inlineAttribute, NeedsClassFieldInitializer::No); } -UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const SourceCode& source, const Identifier& name, ImplementationVisibility implementationVisibility, ConstructorKind constructorKind, ConstructAbility constructAbility, NeedsClassFieldInitializer needsClassFieldInitializer, PrivateBrandRequirement privateBrandRequirement) +UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const SourceCode& source, const Identifier& name, ImplementationVisibility implementationVisibility, ConstructorKind constructorKind, ConstructAbility constructAbility, InlineAttribute inlineAttribute, NeedsClassFieldInitializer needsClassFieldInitializer, PrivateBrandRequirement privateBrandRequirement) { // FIXME: Can we just make MetaData computation be constexpr and have the compiler do this for us? // https://bugs.webkit.org/show_bug.cgi?id=193272 @@ -200,7 +203,7 @@ UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const S JSTokenLocation end; end.line = 1; end.lineStartOffset = source.startOffset(); - end.startOffset = source.startOffset() + strlen("(") + asyncOffset; + end.startOffset = source.startOffset() + strlen("("); end.endOffset = std::numeric_limits::max(); FunctionMetadataNode metadata( @@ -218,7 +221,7 @@ UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const S JSParserBuiltinMode builtinMode = isBuiltinDefaultClassConstructor ? JSParserBuiltinMode::NotBuiltin : JSParserBuiltinMode::Builtin; std::unique_ptr program = parse( vm, source, Identifier(), implementationVisibility, builtinMode, - JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, SuperBinding::NotNeeded, error, + JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, FunctionMode::None, SuperBinding::NotNeeded, error, &positionBeforeLastNewlineFromParser, constructorKind); if (program) { @@ -258,7 +261,7 @@ UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const S } } - UnlinkedFunctionExecutable* functionExecutable = UnlinkedFunctionExecutable::create(vm, source, &metadata, kind, constructAbility, JSParserScriptMode::Classic, nullptr, std::nullopt, DerivedContextType::None, needsClassFieldInitializer, privateBrandRequirement, isBuiltinDefaultClassConstructor); + UnlinkedFunctionExecutable* functionExecutable = UnlinkedFunctionExecutable::create(vm, source, &metadata, kind, constructAbility, inlineAttribute, JSParserScriptMode::Classic, nullptr, std::nullopt, std::nullopt, DerivedContextType::None, needsClassFieldInitializer, privateBrandRequirement, isBuiltinDefaultClassConstructor); return functionExecutable; } @@ -283,7 +286,7 @@ UnlinkedFunctionExecutable* BuiltinExecutables::name##Executable() \ Identifier executableName = m_vm.propertyNames->builtinNames().functionName##PublicName();\ if (overrideName)\ executableName = Identifier::fromString(m_vm, overrideName);\ - m_unlinkedExecutables[index] = createBuiltinExecutable(name##Source(), executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility);\ + m_unlinkedExecutables[index] = createBuiltinExecutable(name##Source(), executableName, s_##name##ImplementationVisibility, s_##name##ConstructorKind, s_##name##ConstructAbility, s_##name##InlineAttribute);\ }\ return m_unlinkedExecutables[index];\ } diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.h b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.h index c9f36aa7..9aca8d30 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.h +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinExecutables.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014-2018 Apple Inc. All rights reserved. + * Copyright (C) 2014-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,6 +31,7 @@ #include "SourceCode.h" #include "Weak.h" #include "WeakHandleOwner.h" +#include namespace JSC { @@ -46,7 +47,7 @@ enum class BuiltinCodeIndex { #undef BUILTIN_NAME_ONLY class BuiltinExecutables { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(BuiltinExecutables); public: explicit BuiltinExecutables(VM&); @@ -60,14 +61,14 @@ SourceCode name##Source(); static SourceCode defaultConstructorSourceCode(ConstructorKind); UnlinkedFunctionExecutable* createDefaultConstructor(ConstructorKind, const Identifier& name, NeedsClassFieldInitializer, PrivateBrandRequirement); - static UnlinkedFunctionExecutable* createExecutable(VM&, const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility, NeedsClassFieldInitializer, PrivateBrandRequirement = PrivateBrandRequirement::None); + static UnlinkedFunctionExecutable* createExecutable(VM&, const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility, InlineAttribute, NeedsClassFieldInitializer, PrivateBrandRequirement = PrivateBrandRequirement::None); void finalizeUnconditionally(CollectionScope); private: VM& m_vm; - UnlinkedFunctionExecutable* createBuiltinExecutable(const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility); + UnlinkedFunctionExecutable* createBuiltinExecutable(const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility, InlineAttribute); Ref m_combinedSourceProvider; UnlinkedFunctionExecutable* m_unlinkedExecutables[static_cast(BuiltinCodeIndex::NumberOfBuiltinCodes)] { }; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.cpp b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.cpp index b8b4fd29..2c0c2e2f 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.cpp +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.cpp @@ -1,6 +1,6 @@ /* * Copyright (C) 2017 Yusuke Suzuki . - * Copyright (C) 2019 Apple Inc. All rights reserved. + * Copyright (C) 2019-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,7 @@ #include "BuiltinNames.h" #include "IdentifierInlines.h" +#include #if COMPILER(MSVC) #pragma warning(push) @@ -71,6 +72,8 @@ SymbolImpl::StaticSymbolImpl polyProtoPrivateName { "PolyProto", SymbolImpl::s_f m_wellKnownSymbolsMap.add(m_##name##SymbolPrivateIdentifier.impl(), symbol); \ } while (0); +WTF_MAKE_TZONE_ALLOCATED_IMPL(BuiltinNames); + // We treat the dollarVM name as a special case below for $vm (because CommonIdentifiers does not // yet support the $ character). BuiltinNames::BuiltinNames(VM& vm, CommonIdentifiers* commonIdentifiers) diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.h b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.h index 1c7f7c64..86db87af 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.h +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinNames.h @@ -31,6 +31,7 @@ #include "JSCBuiltins.h" #include #include +#include namespace JSC { @@ -64,7 +65,6 @@ namespace JSC { macro(ShadowRealm) \ macro(RegExp) \ macro(min) \ - macro(trunc) \ macro(create) \ macro(defineProperty) \ macro(defaultPromiseThen) \ @@ -72,9 +72,7 @@ namespace JSC { macro(Map) \ macro(throwTypeErrorFunction) \ macro(typedArrayLength) \ - macro(typedArrayClone) \ macro(typedArrayContentType) \ - macro(typedArraySort) \ macro(typedArrayGetOriginalConstructor) \ macro(BuiltinLog) \ macro(BuiltinDescribe) \ @@ -119,8 +117,11 @@ namespace JSC { macro(nextMethod) \ macro(asyncGeneratorQueueItemNext) \ macro(this) \ + macro(toIntegerOrInfinity) \ + macro(toLength) \ macro(importMapStatus) \ macro(importInRealm) \ + macro(evalFunction) \ macro(evalInRealm) \ macro(moveFunctionToRealm) \ macro(newTargetLocal) \ @@ -129,7 +130,6 @@ namespace JSC { macro(isSharedTypedArrayView) \ macro(isResizableOrGrowableSharedTypedArrayView) \ macro(isDetached) \ - macro(typedArrayDefaultComparator) \ macro(typedArrayFromFast) \ macro(isBoundFunction) \ macro(hasInstanceBoundFunction) \ @@ -198,6 +198,7 @@ namespace JSC { macro(sentinelString) \ macro(createRemoteFunction) \ macro(isRemoteFunction) \ + macro(arrayFromFast) \ macro(arraySort) \ macro(jsonParse) \ macro(jsonStringify) \ @@ -227,7 +228,8 @@ extern JS_EXPORT_PRIVATE SymbolImpl::StaticSymbolImpl polyProtoPrivateName; } class BuiltinNames { - WTF_MAKE_NONCOPYABLE(BuiltinNames); WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_NONCOPYABLE(BuiltinNames); + WTF_MAKE_TZONE_ALLOCATED(BuiltinNames); public: using PrivateNameSet = MemoryCompactLookupOnlyRobinHoodHashSet; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinUtils.h b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinUtils.h index 6f62ce27..a6871391 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinUtils.h +++ b/vendor/webkit/Source/JavaScriptCore/builtins/BuiltinUtils.h @@ -29,6 +29,7 @@ #include "ConstructAbility.h" #include "ConstructorKind.h" #include "ImplementationVisibility.h" +#include "InlineAttribute.h" namespace JSC { @@ -43,6 +44,6 @@ class SourceCode; class UnlinkedFunctionExecutable; class VM; -JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility); +JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ImplementationVisibility, ConstructorKind, ConstructAbility, InlineAttribute); } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/GlobalOperations.js b/vendor/webkit/Source/JavaScriptCore/builtins/GlobalOperations.js index 9eba3e6a..c594f85f 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/GlobalOperations.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/GlobalOperations.js @@ -26,38 +26,6 @@ // @internal -@linkTimeConstant -function toIntegerOrInfinity(target) -{ - "use strict"; - - var numberValue = +target; - - // isNaN(numberValue) or 0 - if (numberValue !== numberValue || !numberValue) - return 0; - return @trunc(numberValue); -} - -@linkTimeConstant -function toLength(target) -{ - "use strict"; - - var length = @toIntegerOrInfinity(target); - // originally Math.min(Math.max(length, 0), maxSafeInteger)); - return +(length > 0 ? (length < @MAX_SAFE_INTEGER ? length : @MAX_SAFE_INTEGER) : 0); -} - -@linkTimeConstant -@getter -@overriddenName="get [Symbol.species]" -function speciesGetter() -{ - "use strict"; - return this; -} - @linkTimeConstant function speciesConstructor(obj, defaultConstructor) { diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/MapConstructor.js b/vendor/webkit/Source/JavaScriptCore/builtins/MapConstructor.js index f3379ed4..0132ab50 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/MapConstructor.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/MapConstructor.js @@ -36,7 +36,7 @@ function groupBy(items, callback) var groups = new @Map; var k = 0; for (var item of items) { - var key = callback.@call(@undefined, item, k, items); + var key = callback.@call(@undefined, item, k); var group = groups.@get(key); if (!group) { group = []; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/ObjectConstructor.js b/vendor/webkit/Source/JavaScriptCore/builtins/ObjectConstructor.js index abf2be5d..6b3d7038 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/ObjectConstructor.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/ObjectConstructor.js @@ -56,7 +56,7 @@ function groupBy(items, callback) var groups = @Object.@create(null); var k = 0; for (var item of items) { - var key = @toPropertyKey(callback.@call(@undefined, item, k, items)); + var key = @toPropertyKey(callback.@call(@undefined, item, k)); var group = groups[key]; if (!group) { group = []; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/PromiseConstructor.js b/vendor/webkit/Source/JavaScriptCore/builtins/PromiseConstructor.js index 8e0d745a..4ec23847 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/PromiseConstructor.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/PromiseConstructor.js @@ -368,6 +368,28 @@ function resolve(value) return @promiseResolve(this, value); } +function try(callback /*, ...args */) +{ + "use strict"; + + if (!@isObject(this)) + @throwTypeError("|this| is not an object"); + + var args = []; + for (var i = 1; i < arguments.length; i++) + @putByValDirect(args, i - 1, arguments[i]); + + var promiseCapability = @newPromiseCapability(this); + try { + var value = callback.@apply(@undefined, args); + promiseCapability.resolve.@call(@undefined, value); + } catch (error) { + promiseCapability.reject.@call(@undefined, error); + } + + return promiseCapability.promise; +} + function withResolvers() { "use strict"; diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/PromiseOperations.js b/vendor/webkit/Source/JavaScriptCore/builtins/PromiseOperations.js index b495a1f9..1163be9b 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/PromiseOperations.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/PromiseOperations.js @@ -382,7 +382,13 @@ function resolveWithoutPromiseForAsyncAwait(resolution, onFulfilled, onRejected, "use strict"; if (@isPromise(resolution)) { - var constructor = resolution.constructor; + try { + var { constructor } = resolution; + } catch (error) { + onRejected(error, context); + return; + } + if (constructor === @Promise || constructor === @InternalPromise) return @performPromiseThen(resolution, onFulfilled, onRejected, @undefined, context); } diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/SetPrototype.js b/vendor/webkit/Source/JavaScriptCore/builtins/SetPrototype.js index 6620eda4..bad8924f 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/SetPrototype.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/SetPrototype.js @@ -45,6 +45,25 @@ function forEach(callback /*, thisArg */) } while (true); } +// https://tc39.es/proposal-set-methods/#sec-getsetrecord (steps 1-7) +@linkTimeConstant +@alwaysInline +function getSetSizeAsInt(other) +{ + if (!@isObject(other)) + @throwTypeError("Set operation expects first argument to be an object"); + + var size = @toNumber(other.size); + if (size !== size) // is NaN? + @throwTypeError("Set operation expects first argument to have non-NaN 'size' property"); + + var sizeInt = @toIntegerOrInfinity(size); + if (sizeInt < 0) + @throwRangeError("Set operation expects first argument to have non-negative 'size' property"); + + return sizeInt; +} + function union(other) { "use strict"; @@ -53,12 +72,7 @@ function union(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.union expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.union expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); // unused but @getSetSizeAsInt call is observable var has = other.has; if (!@isCallable(has)) @@ -88,12 +102,7 @@ function intersection(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.intersection expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.intersection expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); var has = other.has; if (!@isCallable(has)) @@ -138,12 +147,7 @@ function difference(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.difference expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.difference expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); var has = other.has; if (!@isCallable(has)) @@ -188,12 +192,7 @@ function symmetricDifference(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.symmetricDifference expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.symmetricDifference expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); // unused but @getSetSizeAsInt call is observable var has = other.has; if (!@isCallable(has)) @@ -227,12 +226,7 @@ function isSubsetOf(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.isSubsetOf expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.isSubsetOf expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); var has = other.has; if (!@isCallable(has)) @@ -267,12 +261,7 @@ function isSupersetOf(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.isSupersetOf expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.isSupersetOf expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); var has = other.has; if (!@isCallable(has)) @@ -305,12 +294,7 @@ function isDisjointFrom(other) @throwTypeError("Set operation called on non-Set object"); // Get Set Record - if (!@isObject(other)) - @throwTypeError("Set.prototype.isDisjointFrom expects the first parameter to be an object"); - var size = @toNumber(other.size); - // size is NaN - if (size !== size) - @throwTypeError("Set.prototype.isDisjointFrom expects other.size to be a non-NaN number"); + var size = @getSetSizeAsInt(other); var has = other.has; if (!@isCallable(has)) diff --git a/vendor/webkit/Source/JavaScriptCore/builtins/TypedArrayPrototype.js b/vendor/webkit/Source/JavaScriptCore/builtins/TypedArrayPrototype.js index 7e211ed9..6805e11c 100644 --- a/vendor/webkit/Source/JavaScriptCore/builtins/TypedArrayPrototype.js +++ b/vendor/webkit/Source/JavaScriptCore/builtins/TypedArrayPrototype.js @@ -166,82 +166,6 @@ function some(callback /* [, thisArg] */) return false; } -@linkTimeConstant -function typedArrayMerge(dst, src, srcIndex, srcEnd, width, comparator) -{ - "use strict"; - - var left = srcIndex; - var leftEnd = @min(left + width, srcEnd); - var right = leftEnd; - var rightEnd = @min(right + width, srcEnd); - - for (var dstIndex = left; dstIndex < rightEnd; ++dstIndex) { - if (right < rightEnd) { - if (left >= leftEnd || @toNumber(comparator(src[right], src[left])) < 0) { - dst[dstIndex] = src[right++]; - continue; - } - } - - dst[dstIndex] = src[left++]; - } -} - -@linkTimeConstant -function typedArrayMergeSort(array, valueCount, comparator) -{ - "use strict"; - - var constructor = @typedArrayGetOriginalConstructor(array); - var buffer = new constructor(valueCount); - var dst = buffer; - var src = array; - if (@isResizableOrGrowableSharedTypedArrayView(array)) { - src = new constructor(valueCount); - for (var i = 0; i < valueCount; ++i) - src[i] = array[i]; - } - - for (var width = 1; width < valueCount; width *= 2) { - for (var srcIndex = 0; srcIndex < valueCount; srcIndex += 2 * width) - @typedArrayMerge(dst, src, srcIndex, valueCount, width, comparator); - - var tmp = src; - src = dst; - dst = tmp; - } - - if (src != array) { - valueCount = @min(@typedArrayLength(array), valueCount); - for (var i = 0; i < valueCount; ++i) - array[i] = src[i]; - } -} - -function sort(comparator) -{ - "use strict"; - - if (comparator !== @undefined && !@isCallable(comparator)) - @throwTypeError("TypedArray.prototype.sort requires the comparator argument to be a function or undefined"); - - var length = @typedArrayLength(this); - if (length < 2) - return this; - - // typedArraySort is not safe when the other thread is modifying content. So if |this| is SharedArrayBuffer, - // use JS-implemented sorting. - if (comparator !== @undefined || @isSharedTypedArrayView(this)) { - if (comparator === @undefined) - comparator = @typedArrayDefaultComparator; - @typedArrayMergeSort(this, length, comparator); - } else - @typedArraySort(this); - - return this; -} - function reduce(callback /* [, initialValue] */) { // 22.2.3.19 @@ -385,29 +309,3 @@ function at(index) return (k >= 0 && k < length) ? this[k] : @undefined; } - -function toSorted(comparator) -{ - "use strict"; - - // Step 1. - if (comparator !== @undefined && !@isCallable(comparator)) - @throwTypeError("TypedArray.prototype.toSorted requires the comparator argument to be a function or undefined"); - - var result = @typedArrayClone.@call(this); - - var length = @typedArrayLength(result); - if (length < 2) - return result; - - // typedArraySort is not safe when the other thread is modifying content. So if |result| is SharedArrayBuffer, - // use JS-implemented sorting. - if (comparator !== @undefined || @isSharedTypedArrayView(result)) { - if (comparator === @undefined) - comparator = @typedArrayDefaultComparator; - @typedArrayMergeSort(result, length, comparator); - } else - @typedArraySort(result); - - return result; -} diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.cpp index 4d78740d..c4f878a4 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.cpp @@ -52,10 +52,6 @@ namespace JSC { -namespace AccessCaseInternal { -static constexpr bool verbose = false; -} - DEFINE_ALLOCATOR_WITH_HEAP_IDENTIFIER(AccessCase); AccessCase::AccessCase(VM& vm, JSCell* owner, AccessType type, CacheableIdentifier identifier, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, RefPtr&& prototypeAccessChain) @@ -74,6 +70,7 @@ Ref AccessCase::create(VM& vm, JSCell* owner, AccessType type, Cache switch (type) { case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case InHit: case InMiss: case DeleteNonConfigurable: @@ -92,6 +89,7 @@ Ref AccessCase::create(VM& vm, JSCell* owner, AccessType type, Cache case IndexedProxyObjectLoad: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -140,6 +138,32 @@ Ref AccessCase::create(VM& vm, JSCell* owner, AccessType type, Cache case IndexedResizableTypedArrayUint32Store: case IndexedResizableTypedArrayFloat32Store: case IndexedResizableTypedArrayFloat64Store: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: RELEASE_ASSERT(!prototypeAccessChain); break; case Load: @@ -343,6 +367,7 @@ bool AccessCase::guardedByStructureCheckSkippingConstantIdentifierCheck() const switch (m_type) { case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case ArrayLength: case StringLength: case DirectArgumentsLength: @@ -357,6 +382,7 @@ bool AccessCase::guardedByStructureCheckSkippingConstantIdentifierCheck() const case IndexedProxyObjectLoad: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -404,6 +430,31 @@ bool AccessCase::guardedByStructureCheckSkippingConstantIdentifierCheck() const case IndexedResizableTypedArrayUint32Store: case IndexedResizableTypedArrayFloat32Store: case IndexedResizableTypedArrayFloat64Store: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: return false; case Load: case Miss: @@ -412,6 +463,7 @@ bool AccessCase::guardedByStructureCheckSkippingConstantIdentifierCheck() const case DeleteMiss: case Replace: case IndexedNoIndexingMiss: + case IndexedNoIndexingInMiss: case Transition: case GetGetter: case Getter: @@ -436,6 +488,7 @@ bool AccessCase::requiresIdentifierNameMatch() const case Load: case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: // We don't currently have a by_val for these puts, but we do care about the identifier. case Transition: case Delete: @@ -470,6 +523,7 @@ bool AccessCase::requiresIdentifierNameMatch() const case IndexedProxyObjectLoad: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -518,6 +572,32 @@ bool AccessCase::requiresIdentifierNameMatch() const case IndexedResizableTypedArrayUint32Store: case IndexedResizableTypedArrayFloat32Store: case IndexedResizableTypedArrayFloat64Store: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: return false; } RELEASE_ASSERT_NOT_REACHED(); @@ -529,6 +609,7 @@ bool AccessCase::requiresInt32PropertyCheck() const case Load: case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case Transition: case Delete: case DeleteNonConfigurable: @@ -561,6 +642,7 @@ bool AccessCase::requiresInt32PropertyCheck() const case IndexedProxyObjectLoad: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: return false; case IndexedInt32Load: case IndexedDoubleLoad: @@ -610,99 +692,32 @@ bool AccessCase::requiresInt32PropertyCheck() const case IndexedResizableTypedArrayUint32Store: case IndexedResizableTypedArrayFloat32Store: case IndexedResizableTypedArrayFloat64Store: - return true; - } - RELEASE_ASSERT_NOT_REACHED(); -} - -bool AccessCase::needsScratchFPR() const -{ - switch (m_type) { - case Load: - case LoadMegamorphic: - case StoreMegamorphic: - case Transition: - case Delete: - case DeleteNonConfigurable: - case DeleteMiss: - case Replace: - case Miss: - case GetGetter: - case Getter: - case Setter: - case CustomValueGetter: - case CustomAccessorGetter: - case CustomValueSetter: - case CustomAccessorSetter: - case InHit: - case InMiss: - case CheckPrivateBrand: - case SetPrivateBrand: - case ArrayLength: - case StringLength: - case DirectArgumentsLength: - case ScopedArgumentsLength: - case ModuleNamespaceLoad: - case ProxyObjectHas: - case ProxyObjectLoad: - case ProxyObjectStore: - case InstanceOfHit: - case InstanceOfMiss: - case InstanceOfGeneric: - case IndexedProxyObjectLoad: - case IndexedMegamorphicLoad: - case IndexedMegamorphicStore: - case IndexedInt32Load: - case IndexedContiguousLoad: - case IndexedArrayStorageLoad: - case IndexedScopedArgumentsLoad: - case IndexedDirectArgumentsLoad: - case IndexedTypedArrayInt8Load: - case IndexedTypedArrayUint8Load: - case IndexedTypedArrayUint8ClampedLoad: - case IndexedTypedArrayInt16Load: - case IndexedTypedArrayUint16Load: - case IndexedTypedArrayInt32Load: - case IndexedResizableTypedArrayInt8Load: - case IndexedResizableTypedArrayUint8Load: - case IndexedResizableTypedArrayUint8ClampedLoad: - case IndexedResizableTypedArrayInt16Load: - case IndexedResizableTypedArrayUint16Load: - case IndexedResizableTypedArrayInt32Load: - case IndexedStringLoad: - case IndexedNoIndexingMiss: - case IndexedInt32Store: - case IndexedContiguousStore: - case IndexedArrayStorageStore: - case IndexedTypedArrayInt8Store: - case IndexedTypedArrayUint8Store: - case IndexedTypedArrayUint8ClampedStore: - case IndexedTypedArrayInt16Store: - case IndexedTypedArrayUint16Store: - case IndexedTypedArrayInt32Store: - case IndexedResizableTypedArrayInt8Store: - case IndexedResizableTypedArrayUint8Store: - case IndexedResizableTypedArrayUint8ClampedStore: - case IndexedResizableTypedArrayInt16Store: - case IndexedResizableTypedArrayUint16Store: - case IndexedResizableTypedArrayInt32Store: - return false; - case IndexedDoubleLoad: - case IndexedTypedArrayFloat32Load: - case IndexedTypedArrayFloat64Load: - case IndexedTypedArrayUint32Load: - case IndexedResizableTypedArrayFloat32Load: - case IndexedResizableTypedArrayFloat64Load: - case IndexedResizableTypedArrayUint32Load: - case IndexedDoubleStore: - case IndexedTypedArrayUint32Store: - case IndexedTypedArrayFloat32Store: - case IndexedTypedArrayFloat64Store: - case IndexedResizableTypedArrayUint32Store: - case IndexedResizableTypedArrayFloat32Store: - case IndexedResizableTypedArrayFloat64Store: - // Used by TypedArrayLength/TypedArrayByteOffset in the process of boxing their result as a double - case IntrinsicGetter: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: return true; } RELEASE_ASSERT_NOT_REACHED(); @@ -767,6 +782,7 @@ void AccessCase::forEachDependentCell(VM&, const Functor& functor) const case Load: case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case Transition: case Delete: case DeleteNonConfigurable: @@ -785,6 +801,7 @@ void AccessCase::forEachDependentCell(VM&, const Functor& functor) const case InstanceOfGeneric: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -833,6 +850,32 @@ void AccessCase::forEachDependentCell(VM&, const Functor& functor) const case IndexedResizableTypedArrayUint32Store: case IndexedResizableTypedArrayFloat32Store: case IndexedResizableTypedArrayFloat64Store: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: break; } } @@ -866,6 +909,7 @@ bool AccessCase::doesCalls(VM& vm, Vector* cellsToMarkIfDoesCalls) cons case Load: case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case Miss: case GetGetter: case InHit: @@ -882,6 +926,7 @@ bool AccessCase::doesCalls(VM& vm, Vector* cellsToMarkIfDoesCalls) cons case InstanceOfGeneric: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -930,6 +975,32 @@ bool AccessCase::doesCalls(VM& vm, Vector* cellsToMarkIfDoesCalls) cons case IndexedResizableTypedArrayUint32Store: case IndexedResizableTypedArrayFloat32Store: case IndexedResizableTypedArrayFloat64Store: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: doesCalls = false; break; case Replace: @@ -993,8 +1064,10 @@ bool AccessCase::canReplace(const AccessCase& other) const switch (type()) { case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -1050,6 +1123,31 @@ bool AccessCase::canReplace(const AccessCase& other) const case ProxyObjectLoad: case ProxyObjectStore: case IndexedProxyObjectLoad: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: return other.type() == type(); case ModuleNamespaceLoad: { @@ -1099,6 +1197,7 @@ bool AccessCase::canReplace(const AccessCase& other) const case CheckPrivateBrand: case SetPrivateBrand: case IndexedNoIndexingMiss: + case IndexedNoIndexingInMiss: if (other.type() != type()) return false; @@ -1205,6 +1304,7 @@ inline void AccessCase::runWithDowncast(const Func& func) switch (m_type) { case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case Transition: case Delete: case DeleteNonConfigurable: @@ -1220,6 +1320,7 @@ inline void AccessCase::runWithDowncast(const Func& func) case SetPrivateBrand: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -1268,6 +1369,32 @@ inline void AccessCase::runWithDowncast(const Func& func) case IndexedResizableTypedArrayFloat64Store: case IndexedStringLoad: case IndexedNoIndexingMiss: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: case InstanceOfGeneric: func(static_cast(this)); break; @@ -1347,6 +1474,7 @@ bool AccessCase::canBeShared(const AccessCase& lhs, const AccessCase& rhs) case Load: case LoadMegamorphic: case StoreMegamorphic: + case InMegamorphic: case Transition: case Delete: case DeleteNonConfigurable: @@ -1364,6 +1492,7 @@ bool AccessCase::canBeShared(const AccessCase& lhs, const AccessCase& rhs) case SetPrivateBrand: case IndexedMegamorphicLoad: case IndexedMegamorphicStore: + case IndexedMegamorphicIn: case IndexedInt32Load: case IndexedDoubleLoad: case IndexedContiguousLoad: @@ -1412,6 +1541,32 @@ bool AccessCase::canBeShared(const AccessCase& lhs, const AccessCase& rhs) case IndexedResizableTypedArrayFloat64Store: case IndexedStringLoad: case IndexedNoIndexingMiss: + case IndexedInt32InHit: + case IndexedDoubleInHit: + case IndexedContiguousInHit: + case IndexedArrayStorageInHit: + case IndexedScopedArgumentsInHit: + case IndexedDirectArgumentsInHit: + case IndexedTypedArrayInt8InHit: + case IndexedTypedArrayUint8InHit: + case IndexedTypedArrayUint8ClampedInHit: + case IndexedTypedArrayInt16InHit: + case IndexedTypedArrayUint16InHit: + case IndexedTypedArrayInt32InHit: + case IndexedTypedArrayUint32InHit: + case IndexedTypedArrayFloat32InHit: + case IndexedTypedArrayFloat64InHit: + case IndexedResizableTypedArrayInt8InHit: + case IndexedResizableTypedArrayUint8InHit: + case IndexedResizableTypedArrayUint8ClampedInHit: + case IndexedResizableTypedArrayInt16InHit: + case IndexedResizableTypedArrayUint16InHit: + case IndexedResizableTypedArrayInt32InHit: + case IndexedResizableTypedArrayUint32InHit: + case IndexedResizableTypedArrayFloat32InHit: + case IndexedResizableTypedArrayFloat64InHit: + case IndexedStringInHit: + case IndexedNoIndexingInMiss: case InstanceOfGeneric: return true; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.h b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.h index 6dff048a..167ff032 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCase.h @@ -47,6 +47,7 @@ class ModuleNamespaceAccessCase; class ProxyableAccessCase; class InlineCacheCompiler; +class InlineCacheHandler; DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(AccessCase); @@ -111,6 +112,7 @@ DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(AccessCase); macro(IntrinsicGetter) \ macro(InHit) \ macro(InMiss) \ + macro(InMegamorphic) \ macro(ArrayLength) \ macro(StringLength) \ macro(DirectArgumentsLength) \ @@ -175,7 +177,33 @@ DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(AccessCase); macro(IndexedResizableTypedArrayUint32Store) \ macro(IndexedResizableTypedArrayFloat32Store) \ macro(IndexedResizableTypedArrayFloat64Store) \ - + macro(IndexedInt32InHit) \ + macro(IndexedDoubleInHit) \ + macro(IndexedContiguousInHit) \ + macro(IndexedArrayStorageInHit) \ + macro(IndexedScopedArgumentsInHit) \ + macro(IndexedDirectArgumentsInHit) \ + macro(IndexedTypedArrayInt8InHit) \ + macro(IndexedTypedArrayUint8InHit) \ + macro(IndexedTypedArrayUint8ClampedInHit) \ + macro(IndexedTypedArrayInt16InHit) \ + macro(IndexedTypedArrayUint16InHit) \ + macro(IndexedTypedArrayInt32InHit) \ + macro(IndexedTypedArrayUint32InHit) \ + macro(IndexedTypedArrayFloat32InHit) \ + macro(IndexedTypedArrayFloat64InHit) \ + macro(IndexedResizableTypedArrayInt8InHit) \ + macro(IndexedResizableTypedArrayUint8InHit) \ + macro(IndexedResizableTypedArrayUint8ClampedInHit) \ + macro(IndexedResizableTypedArrayInt16InHit) \ + macro(IndexedResizableTypedArrayUint16InHit) \ + macro(IndexedResizableTypedArrayInt32InHit) \ + macro(IndexedResizableTypedArrayUint32InHit) \ + macro(IndexedResizableTypedArrayFloat32InHit) \ + macro(IndexedResizableTypedArrayFloat64InHit) \ + macro(IndexedStringInHit) \ + macro(IndexedNoIndexingInMiss) \ + macro(IndexedMegamorphicIn) \ class AccessCase : public ThreadSafeRefCounted { WTF_MAKE_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(AccessCase); @@ -293,7 +321,6 @@ class AccessCase : public ThreadSafeRefCounted { bool requiresIdentifierNameMatch() const; bool requiresInt32PropertyCheck() const; - bool needsScratchFPR() const; UniquedStringImpl* uid() const { return m_identifier.uid(); } CacheableIdentifier identifier() const { return m_identifier; } @@ -398,148 +425,6 @@ class AccessCase : public ThreadSafeRefCounted { CacheableIdentifier m_identifier; }; -class SharedJITStubSet { - WTF_MAKE_FAST_ALLOCATED(SharedJITStubSet); -public: - SharedJITStubSet() = default; - - struct Hash { - struct Key { - Key() = default; - - Key(GPRReg baseGPR, GPRReg valueGPR, GPRReg extraGPR, GPRReg extra2GPR, GPRReg stubInfoGPR, GPRReg arrayProfileGPR, ScalarRegisterSet usedRegisters, PolymorphicAccessJITStubRoutine* wrapped) - : m_wrapped(wrapped) - , m_baseGPR(baseGPR) - , m_valueGPR(valueGPR) - , m_extraGPR(extraGPR) - , m_extra2GPR(extra2GPR) - , m_stubInfoGPR(stubInfoGPR) - , m_arrayProfileGPR(arrayProfileGPR) - , m_usedRegisters(usedRegisters) - { } - - Key(WTF::HashTableDeletedValueType) - : m_wrapped(bitwise_cast(static_cast(1))) - { } - - bool isHashTableDeletedValue() const { return m_wrapped == bitwise_cast(static_cast(1)); } - - friend bool operator==(const Key&, const Key&) = default; - - PolymorphicAccessJITStubRoutine* m_wrapped { nullptr }; - GPRReg m_baseGPR; - GPRReg m_valueGPR; - GPRReg m_extraGPR; - GPRReg m_extra2GPR; - GPRReg m_stubInfoGPR; - GPRReg m_arrayProfileGPR; - ScalarRegisterSet m_usedRegisters; - }; - - using KeyTraits = SimpleClassHashTraits; - - static unsigned hash(const Key& p) - { - if (!p.m_wrapped) - return 1; - return p.m_wrapped->hash(); - } - - static bool equal(const Key& a, const Key& b) - { - return a == b; - } - - static constexpr bool safeToCompareToEmptyOrDeleted = false; - }; - - struct Searcher { - struct Translator { - static unsigned hash(const Searcher& searcher) - { - return PolymorphicAccessJITStubRoutine::computeHash(searcher.m_cases, searcher.m_weakStructures); - } - - static bool equal(const Hash::Key a, const Searcher& b) - { - if (a.m_baseGPR == b.m_baseGPR - && a.m_valueGPR == b.m_valueGPR - && a.m_extraGPR == b.m_extraGPR - && a.m_extra2GPR == b.m_extra2GPR - && a.m_stubInfoGPR == b.m_stubInfoGPR - && a.m_arrayProfileGPR == b.m_arrayProfileGPR - && a.m_usedRegisters == b.m_usedRegisters) { - // FIXME: The ordering of cases does not matter for sharing capabilities. - // We can potentially increase success rate by making this comparison / hashing non ordering sensitive. - const auto& aCases = a.m_wrapped->cases(); - const auto& bCases = b.m_cases; - if (aCases.size() != bCases.size()) - return false; - for (unsigned index = 0; index < bCases.size(); ++index) { - if (!AccessCase::canBeShared(*aCases[index], *bCases[index])) - return false; - } - const auto& aWeak = a.m_wrapped->weakStructures(); - const auto& bWeak = b.m_weakStructures; - if (aWeak.size() != bWeak.size()) - return false; - for (unsigned i = 0, size = aWeak.size(); i < size; ++i) { - if (aWeak[i] != bWeak[i]) - return false; - } - return true; - } - return false; - } - }; - - GPRReg m_baseGPR; - GPRReg m_valueGPR; - GPRReg m_extraGPR; - GPRReg m_extra2GPR; - GPRReg m_stubInfoGPR; - GPRReg m_arrayProfileGPR; - ScalarRegisterSet m_usedRegisters; - const FixedVector>& m_cases; - const FixedVector& m_weakStructures; - }; - - struct PointerTranslator { - static unsigned hash(PolymorphicAccessJITStubRoutine* stub) - { - return stub->hash(); - } - - static bool equal(const Hash::Key& key, PolymorphicAccessJITStubRoutine* stub) - { - return key.m_wrapped == stub; - } - }; - - void add(Hash::Key&& key) - { - m_stubs.add(WTFMove(key)); - } - - void remove(PolymorphicAccessJITStubRoutine* stub) - { - auto iter = m_stubs.find(stub); - if (iter != m_stubs.end()) - m_stubs.remove(iter); - } - - PolymorphicAccessJITStubRoutine* find(const Searcher& searcher) - { - auto entry = m_stubs.find(searcher); - if (entry != m_stubs.end()) - return entry->m_wrapped; - return nullptr; - } - -private: - HashSet m_stubs; -}; - } // namespace JSC #endif diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp index 4910878a..04824920 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2018 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -53,21 +53,11 @@ class SlowPathCallGeneratorWithArguments final : public AccessCaseSnippetParams: // We spill (1) the used registers by IC and (2) the used registers by Snippet. InlineCacheCompiler::SpillState spillState = compiler.preserveLiveRegistersToStackForCall(usedRegistersBySnippet.buildAndValidate()); - jit.store32( - CCallHelpers::TrustedImm32(compiler.callSiteIndexForExceptionHandlingOrOriginal().bits()), - CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); - jit.makeSpaceOnStackForCCall(); jit.setupArguments(std::get(m_arguments)...); jit.prepareCallOperation(compiler.vm()); - - CCallHelpers::Call operationCall = jit.call(OperationPtrTag); - auto function = m_function; - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link(operationCall, function); - }); - + jit.callOperation(m_function); jit.setupResults(m_result); jit.reclaimSpaceOnStackForCCall(); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.h b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.h index 1e2f4719..3b8af7f6 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #if ENABLE(JIT) #include "SnippetParams.h" +#include namespace JSC { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp index b53edfb1..4efa1853 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,9 +27,12 @@ #include "AdaptiveInferredPropertyValueWatchpointBase.h" #include "JSCInlines.h" +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(AdaptiveInferredPropertyValueWatchpointBase); + AdaptiveInferredPropertyValueWatchpointBase::AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition& key) : m_key(key) { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h b/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h index ce4260f7..1ea97bc3 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,8 +27,8 @@ #include "ObjectPropertyCondition.h" #include "Watchpoint.h" -#include #include +#include namespace JSC { @@ -36,7 +36,7 @@ namespace JSC { // https://bugs.webkit.org/show_bug.cgi?id=202381 class AdaptiveInferredPropertyValueWatchpointBase { WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpointBase); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(AdaptiveInferredPropertyValueWatchpointBase); public: AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition&); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ArithProfile.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ArithProfile.h index 0a4c2e8b..ebff5c94 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ArithProfile.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ArithProfile.h @@ -53,7 +53,7 @@ struct ObservedType { constexpr ObservedType withNonNumber() const { return ObservedType(m_bits | TypeNonNumber); } constexpr ObservedType withoutNonNumber() const { return ObservedType(m_bits & ~TypeNonNumber); } - friend constexpr bool operator==(ObservedType, ObservedType) = default; + friend constexpr bool operator==(const ObservedType&, const ObservedType&) = default; static constexpr uint8_t TypeEmpty = 0x0; static constexpr uint8_t TypeInt32 = 0x1; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h index d7643d19..e7e5e11c 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2013-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -43,7 +43,7 @@ DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(BytecodeBasicBlock); template class BytecodeBasicBlock { - WTF_MAKE_FAST_ALLOCATED(BytecodeBasicBlock); + WTF_MAKE_TZONE_ALLOCATED(BytecodeBasicBlock); WTF_MAKE_NONCOPYABLE(BytecodeBasicBlock); friend class BytecodeGraph; public: diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp index a8375b19..9494059a 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp @@ -279,8 +279,7 @@ void CodeBlockBytecodeDumper::dumpGraph(Block* block, const JSInstruction out.printf("\n"); - Vector> predecessors; - predecessors.resize(graph.size()); + Vector> predecessors(graph.size()); for (auto& block : graph) { if (block.isEntryBlock() || block.isExitBlock()) continue; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp index e96c1951..1d7ade50 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp @@ -78,7 +78,7 @@ class BytecodeGeneratorification { auto bytecode = instruction->as(); unsigned liveCalleeLocalsIndex = bytecode.m_yieldPoint; if (liveCalleeLocalsIndex >= m_yields.size()) - m_yields.resize(liveCalleeLocalsIndex + 1); + m_yields.grow(liveCalleeLocalsIndex + 1); YieldData& data = m_yields[liveCalleeLocalsIndex]; data.point = instruction.offset(); data.argument = bytecode.m_argument; @@ -147,7 +147,7 @@ class BytecodeGeneratorification { // It means that, the register can be retrieved even if the immediate previous op_save does not save it. if (m_storages.size() <= index) - m_storages.resize(index + 1); + m_storages.grow(index + 1); if (std::optional storage = m_storages[index]) return *storage; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGraph.h b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGraph.h index d283cb65..d179e1e3 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGraph.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeGraph.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2016 Yusuke Suzuki - * Copyright (C) 2016 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,13 +30,14 @@ #include "BytecodeDumper.h" #include #include +#include #include namespace JSC { class BytecodeGraph { - WTF_MAKE_FAST_ALLOCATED; WTF_MAKE_NONCOPYABLE(BytecodeGraph); + WTF_MAKE_TZONE_ALLOCATED(BytecodeGraph); public: using BasicBlockType = JSBytecodeBasicBlock; using BasicBlocksVector = typename BasicBlockType::BasicBlockVector; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp index ea974ea7..98bd46d8 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp @@ -1,6 +1,6 @@ /* * Copyright (C) 2015 Yusuke Suzuki . - * Copyright (C) 2016-2019 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -45,9 +45,12 @@ #include "LinkTimeConstant.h" #include "Nodes.h" #include "StrongInlines.h" +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(BytecodeIntrinsicRegistry); + #define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(vm.propertyNames->builtinNames().name##PrivateName().impl(), Entry(&BytecodeIntrinsicNode::emit_intrinsic_##name)); #define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET_FOR_LINK_TIME_CONSTANT(name, code) m_bytecodeIntrinsicMap.add(vm.propertyNames->builtinNames().name##PrivateName().impl(), JSC::LinkTimeConstant::name); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h index 01f190d8..1a68778f 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2015 Yusuke Suzuki . - * Copyright (C) 2016-2017 Apple Inc. All rights reserved. + * Copyright (C) 2016-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include "Identifier.h" #include #include +#include namespace JSC { @@ -173,8 +174,8 @@ enum class LinkTimeConstant : int32_t; macro(sentinelSetBucket) \ class BytecodeIntrinsicRegistry { - WTF_MAKE_FAST_ALLOCATED; WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry); + WTF_MAKE_TZONE_ALLOCATED(BytecodeIntrinsicRegistry); public: explicit BytecodeIntrinsicRegistry(VM&); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeList.rb b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeList.rb index a200263f..d9163358 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeList.rb +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeList.rb @@ -1396,7 +1396,7 @@ op :llint_native_construct_trampoline op :llint_internal_function_call_trampoline op :llint_internal_function_construct_trampoline -op :llint_link_call_trampoline +op :llint_default_call_trampoline op :llint_virtual_call_trampoline op :llint_virtual_construct_trampoline op :llint_virtual_tail_call_trampoline @@ -1412,27 +1412,16 @@ op :op_construct_return_location op :op_call_varargs_return_location op :op_construct_varargs_return_location -op :op_call_varargs_slow_return_location -op :op_construct_varargs_slow_return_location op :op_get_by_id_return_location op :op_get_by_val_return_location op :op_put_by_id_return_location op :op_put_by_val_return_location op :op_iterator_open_return_location op :op_iterator_next_return_location +op :op_call_direct_eval_slow_return_location op :wasm_function_prologue op :wasm_function_prologue_simd -op :op_call_slow_return_location -op :op_call_ignore_result_slow_return_location -op :op_construct_slow_return_location -op :op_iterator_open_slow_return_location -op :op_iterator_next_slow_return_location -op :op_tail_call_slow_return_location -op :op_tail_call_forward_arguments_slow_return_location -op :op_tail_call_varargs_slow_return_location -op :op_call_direct_eval_slow_return_location - op :js_trampoline_op_call op :js_trampoline_op_call_ignore_result op :js_trampoline_op_construct @@ -1440,17 +1429,7 @@ op :js_trampoline_op_construct_varargs op :js_trampoline_op_iterator_next op :js_trampoline_op_iterator_open -op :js_trampoline_op_call_slow -op :js_trampoline_op_call_ignore_result_slow -op :js_trampoline_op_tail_call_slow -op :js_trampoline_op_construct_slow -op :js_trampoline_op_call_varargs_slow -op :js_trampoline_op_tail_call_varargs_slow -op :js_trampoline_op_tail_call_forward_arguments_slow -op :js_trampoline_op_construct_varargs_slow op :js_trampoline_op_call_direct_eval_slow -op :js_trampoline_op_iterator_next_slow -op :js_trampoline_op_iterator_open_slow op :js_trampoline_llint_function_for_call_arity_check_untag op :js_trampoline_llint_function_for_call_arity_check_tag op :js_trampoline_llint_function_for_construct_arity_check_untag @@ -1493,36 +1472,6 @@ op :llint_cloop_did_return_from_js_23 op :llint_cloop_did_return_from_js_24 op :llint_cloop_did_return_from_js_25 -op :llint_cloop_did_return_from_js_26 -op :llint_cloop_did_return_from_js_27 -op :llint_cloop_did_return_from_js_28 -op :llint_cloop_did_return_from_js_29 -op :llint_cloop_did_return_from_js_30 -op :llint_cloop_did_return_from_js_31 -op :llint_cloop_did_return_from_js_32 -op :llint_cloop_did_return_from_js_33 -op :llint_cloop_did_return_from_js_34 -op :llint_cloop_did_return_from_js_35 -op :llint_cloop_did_return_from_js_36 -op :llint_cloop_did_return_from_js_37 -op :llint_cloop_did_return_from_js_38 -op :llint_cloop_did_return_from_js_39 -op :llint_cloop_did_return_from_js_40 -op :llint_cloop_did_return_from_js_41 -op :llint_cloop_did_return_from_js_42 -op :llint_cloop_did_return_from_js_43 -op :llint_cloop_did_return_from_js_44 -op :llint_cloop_did_return_from_js_45 -op :llint_cloop_did_return_from_js_46 -op :llint_cloop_did_return_from_js_47 -op :llint_cloop_did_return_from_js_48 -op :llint_cloop_did_return_from_js_49 -op :llint_cloop_did_return_from_js_50 -op :llint_cloop_did_return_from_js_51 -op :llint_cloop_did_return_from_js_52 -op :llint_cloop_did_return_from_js_53 -op :llint_cloop_did_return_from_js_54 -op :llint_cloop_did_return_from_js_55 end_section :CLoopReturnHelpers @@ -1874,7 +1823,7 @@ exception: VirtualRegister, } -op :i31_new, +op :ref_i31, args: { dst: VirtualRegister, value: VirtualRegister, @@ -1919,6 +1868,15 @@ arrayref: VirtualRegister, } +op :array_fill, + args: { + arrayref: VirtualRegister, + offset: VirtualRegister, + value: VirtualRegister, + size: VirtualRegister, + typeIndex: unsigned, + } + op :struct_new, args: { dst: VirtualRegister, @@ -1932,6 +1890,7 @@ dst: VirtualRegister, structReference: VirtualRegister, fieldIndex: unsigned, + structGetKind: unsigned, } op :struct_set, @@ -1941,7 +1900,7 @@ value: VirtualRegister, } -op :extern_externalize, +op :extern_convert_any, args: { dst: VirtualRegister, reference: VirtualRegister, diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp index c0b497d9..9178ae9b 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp @@ -31,9 +31,13 @@ #include "CodeBlock.h" #include "FullBytecodeLiveness.h" #include "JSCJSValueInlines.h" +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(BytecodeLivenessAnalysis); +WTF_MAKE_TZONE_ALLOCATED_IMPL(FullBytecodeLiveness); + BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock) : m_graph(codeBlock, codeBlock->instructions()) { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h index 5d275b2e..c358f7f3 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h @@ -77,7 +77,7 @@ class BytecodeLivenessPropagation { }; class BytecodeLivenessAnalysis : private BytecodeLivenessPropagation { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(BytecodeLivenessAnalysis); WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis); public: friend class BytecodeLivenessPropagation; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeRewriter.h b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeRewriter.h index 703d6abb..c130d413 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeRewriter.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/BytecodeRewriter.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2016 Yusuke Suzuki - * Copyright (C) 2016 Apple Inc. All rights reserved. + * Copyright (C) 2016-2024 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -226,6 +226,9 @@ WTF_MAKE_NONCOPYABLE(BytecodeRewriter); void adjustJumpTargets(); + template + void forEachLabelPoint(Func); + private: void insertImpl(InsertionPoint, IncludeBranch, JSInstructionStreamWriter&& fragment); @@ -256,4 +259,18 @@ inline int BytecodeRewriter::calculateDifference(Iterator begin, Iterator end) return result; } +template +void BytecodeRewriter::forEachLabelPoint(Func func) +{ + int32_t previousBytecodeOffset = -1; + for (size_t i = 0; i < m_insertions.size(); ++i) { + Insertion& insertion = m_insertions[i]; + int32_t bytecodeOffset = insertion.index.bytecodeOffset; + if (bytecodeOffset == previousBytecodeOffset) + continue; + previousBytecodeOffset = bytecodeOffset; + func(bytecodeOffset); + } +} + } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp index 33a22193..5fbfd18f 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp @@ -31,7 +31,10 @@ #include "DFGJITCode.h" #include "DisallowMacroScratchRegisterUsage.h" #include "FunctionCodeBlock.h" +#include "JITThunks.h" #include "JSCellInlines.h" +#include "JSWebAssemblyModule.h" +#include "LLIntEntrypoint.h" #include "LinkBuffer.h" #include "Opcode.h" #include "Repatch.h" @@ -76,9 +79,6 @@ CallLinkInfo::CallType CallLinkInfo::callTypeFor(OpcodeID opcodeID) CallLinkInfo::~CallLinkInfo() { clearStub(); - - if (isOnList()) - remove(); } void CallLinkInfo::clearStub() @@ -92,12 +92,24 @@ void CallLinkInfo::clearStub() #endif } -void CallLinkInfo::unlink(VM& vm) +void CallLinkInfo::unlinkOrUpgradeImpl(VM& vm, CodeBlock* oldCodeBlock, CodeBlock* newCodeBlock) { // We could be called even if we're not linked anymore because of how polymorphic calls // work. Each callsite within the polymorphic call stub may separately ask us to unlink(). - if (isLinked()) - unlinkCall(vm, *this); + if (isLinked()) { + if (newCodeBlock && isDataIC() && mode() == Mode::Monomorphic && oldCodeBlock == u.dataIC.m_codeBlock) { + // Upgrading Monomorphic DataIC with newCodeBlock. + remove(); + ArityCheckMode arityCheck = oldCodeBlock->jitCode()->addressForCall(ArityCheckNotRequired) == u.dataIC.m_monomorphicCallDestination ? ArityCheckNotRequired : MustCheckArity; + auto target = newCodeBlock->jitCode()->addressForCall(arityCheck); + u.dataIC.m_codeBlock = newCodeBlock; + u.dataIC.m_monomorphicCallDestination = target; + newCodeBlock->linkIncomingCall(nullptr, this); // This is just relinking. So owner and caller frame can be nullptr. + return; + } + dataLogLnIf(Options::dumpDisassembly(), "Unlinking CallLinkInfo: ", RawPointer(this)); + revertCall(vm); + } // Either we were unlinked, in which case we should not have been on any list, or we unlinked // ourselves so that we're not on any list anymore. @@ -106,15 +118,13 @@ void CallLinkInfo::unlink(VM& vm) CodeLocationLabel CallLinkInfo::doneLocation() { - RELEASE_ASSERT(!isDirect()); return m_doneLocation; } void CallLinkInfo::setMonomorphicCallee(VM& vm, JSCell* owner, JSObject* callee, CodeBlock* codeBlock, CodePtr codePtr) { - RELEASE_ASSERT(!isDirect()); RELEASE_ASSERT(!(bitwise_cast(callee) & polymorphicCalleeMask)); - m_calleeOrCodeBlock.set(vm, owner, callee); + m_callee.set(vm, owner, callee); if (isDataIC()) { u.dataIC.m_codeBlock = codeBlock; @@ -128,12 +138,12 @@ void CallLinkInfo::setMonomorphicCallee(VM& vm, JSCell* owner, JSObject* callee, RELEASE_ASSERT_NOT_REACHED(); #endif } + m_mode = static_cast(Mode::Monomorphic); } void CallLinkInfo::clearCallee() { - RELEASE_ASSERT(!isDirect()); - m_calleeOrCodeBlock.clear(); + m_callee.clear(); if (isDataIC()) { u.dataIC.m_codeBlock = nullptr; u.dataIC.m_monomorphicCallDestination = nullptr; @@ -149,63 +159,23 @@ void CallLinkInfo::clearCallee() JSObject* CallLinkInfo::callee() { - RELEASE_ASSERT(!isDirect()); - RELEASE_ASSERT(!(bitwise_cast(m_calleeOrCodeBlock.get()) & polymorphicCalleeMask)); - return jsCast(m_calleeOrCodeBlock.get()); -} - -void CallLinkInfo::setCodeBlock(VM& vm, JSCell* owner, FunctionCodeBlock* codeBlock) -{ - RELEASE_ASSERT(isDirect()); - m_calleeOrCodeBlock.setMayBeNull(vm, owner, codeBlock); -} - -void CallLinkInfo::clearCodeBlock() -{ - RELEASE_ASSERT(isDirect()); - m_calleeOrCodeBlock.clear(); -} - -FunctionCodeBlock* CallLinkInfo::codeBlock() -{ - RELEASE_ASSERT(isDirect()); - return jsCast(m_calleeOrCodeBlock.get()); + RELEASE_ASSERT(!(bitwise_cast(m_callee.get()) & polymorphicCalleeMask)); + return m_callee.get(); } void CallLinkInfo::setLastSeenCallee(VM& vm, const JSCell* owner, JSObject* callee) { - RELEASE_ASSERT(!isDirect()); - m_lastSeenCalleeOrExecutable.set(vm, owner, callee); -} - -void CallLinkInfo::clearLastSeenCallee() -{ - RELEASE_ASSERT(!isDirect()); - m_lastSeenCalleeOrExecutable.clear(); + m_lastSeenCallee.set(vm, owner, callee); } JSObject* CallLinkInfo::lastSeenCallee() const { - RELEASE_ASSERT(!isDirect()); - return jsCast(m_lastSeenCalleeOrExecutable.get()); + return m_lastSeenCallee.get(); } bool CallLinkInfo::haveLastSeenCallee() const { - RELEASE_ASSERT(!isDirect()); - return !!m_lastSeenCalleeOrExecutable; -} - -void CallLinkInfo::setExecutableDuringCompilation(ExecutableBase* executable) -{ - RELEASE_ASSERT(isDirect()); - m_lastSeenCalleeOrExecutable.setWithoutWriteBarrier(executable); -} - -ExecutableBase* CallLinkInfo::executable() -{ - RELEASE_ASSERT(isDirect()); - return jsCast(m_lastSeenCalleeOrExecutable.get()); + return !!m_lastSeenCallee; } void CallLinkInfo::visitWeak(VM& vm) @@ -217,73 +187,49 @@ void CallLinkInfo::visitWeak(VM& vm) m_clearedByGC = true; }; - if (isLinked()) { + switch (mode()) { + case Mode::Init: + case Mode::Virtual: + break; + case Mode::Polymorphic: { if (stub()) { #if ENABLE(JIT) if (!stub()->visitWeak(vm)) { - if (UNLIKELY(Options::verboseOSR())) { - dataLog( - "At ", codeOrigin(), ", ", RawPointer(this), ": clearing call stub to ", - listDump(stub()->variants()), ", stub routine ", RawPointer(stub()), - ".\n"); - } - unlink(vm); + dataLogLnIf(Options::verboseOSR(), "At ", codeOrigin(), ", ", RawPointer(this), ": clearing call stub to ", listDump(stub()->variants()), ", stub routine ", RawPointer(stub()), "."); + unlinkOrUpgrade(vm, nullptr, nullptr); m_clearedByGC = true; } #else RELEASE_ASSERT_NOT_REACHED(); #endif - } else if (!vm.heap.isMarked(m_calleeOrCodeBlock.get())) { - if (isDirect()) { - if (UNLIKELY(Options::verboseOSR())) { - dataLog( - "Clearing call to ", RawPointer(codeBlock()), " (", - pointerDump(codeBlock()), ").\n"); - } + } + break; + } + case Mode::Monomorphic: { + auto* callee = m_callee.get(); + if (callee && !vm.heap.isMarked(callee)) { + if (callee->type() == JSFunctionType) { + dataLogLnIf(Options::verboseOSR(), "Clearing call to ", RawPointer(callee), " (", static_cast(callee)->executable()->hashFor(specializationKind()), ")."); + handleSpecificCallee(static_cast(callee)); } else { - JSObject* callee = jsCast(m_calleeOrCodeBlock.get()); - if (callee->type() == JSFunctionType) { - if (UNLIKELY(Options::verboseOSR())) { - dataLog( - "Clearing call to ", - RawPointer(callee), " (", - static_cast(callee)->executable()->hashFor(specializationKind()), - ").\n"); - } - handleSpecificCallee(static_cast(callee)); - } else { - if (UNLIKELY(Options::verboseOSR())) - dataLog("Clearing call to ", RawPointer(callee), ".\n"); - m_clearedByGC = true; - } - } - unlink(vm); - } else if (isDirect() && !vm.heap.isMarked(m_lastSeenCalleeOrExecutable.get())) { - if (UNLIKELY(Options::verboseOSR())) { - dataLog( - "Clearing call to ", RawPointer(executable()), - " because the executable is dead.\n"); + dataLogLnIf(Options::verboseOSR(), "Clearing call to ", RawPointer(callee), "."); + m_clearedByGC = true; } - unlink(vm); - // We should only get here once the owning CodeBlock is dying, since the executable must - // already be in the owner's weak references. - m_lastSeenCalleeOrExecutable.clear(); + unlinkOrUpgrade(vm, nullptr, nullptr); } + break; } - if (!isDirect() && haveLastSeenCallee() && !vm.heap.isMarked(lastSeenCallee())) { + } + + if (haveLastSeenCallee() && !vm.heap.isMarked(lastSeenCallee())) { if (lastSeenCallee()->type() == JSFunctionType) handleSpecificCallee(jsCast(lastSeenCallee())); else m_clearedByGC = true; - clearLastSeenCallee(); + m_lastSeenCallee.clear(); } } -void CallLinkInfo::setSlowPathCallDestination(CodePtr codePtr) -{ - m_slowPathCallDestination = codePtr; -} - void CallLinkInfo::revertCallToStub() { RELEASE_ASSERT(stub()); @@ -296,121 +242,187 @@ void CallLinkInfo::revertCallToStub() // need something cleaner. But this works on arm64 for now. if (isDataIC()) { - m_calleeOrCodeBlock.clear(); + m_callee.clear(); u.dataIC.m_codeBlock = nullptr; u.dataIC.m_monomorphicCallDestination = nullptr; } else { #if ENABLE(JIT) MacroAssembler::repatchPointer(u.codeIC.m_codeBlockLocation, nullptr); - CCallHelpers::revertJumpReplacementToBranchPtrWithPatch( - CCallHelpers::startOfBranchPtrWithPatchOnRegister(u.codeIC.m_calleeLocation), calleeGPR(), nullptr); + CCallHelpers::revertJumpReplacementToBranchPtrWithPatch(CCallHelpers::startOfBranchPtrWithPatchOnRegister(u.codeIC.m_calleeLocation), BaselineJITRegisters::Call::calleeGPR, nullptr); #else RELEASE_ASSERT_NOT_REACHED(); #endif } } -void BaselineCallLinkInfo::initialize(VM& vm, CallType callType, BytecodeIndex bytecodeIndex) +void BaselineCallLinkInfo::initialize(VM& vm, CodeBlock* owner, CallType callType, BytecodeIndex bytecodeIndex) { + m_owner = owner; m_type = static_cast(Type::Baseline); ASSERT(Type::Baseline == type()); m_useDataIC = static_cast(UseDataIC::Yes); ASSERT(UseDataIC::Yes == useDataIC()); m_bytecodeIndex = bytecodeIndex; m_callType = callType; - if (LIKELY(Options::useLLIntICs())) - setSlowPathCallDestination(vm.getCTILinkCall().code()); - else - setSlowPathCallDestination(vm.getCTIVirtualCall(callMode()).retagged().code()); + m_mode = static_cast(Mode::Init); // If JIT is disabled, we should not support dynamically generated call IC. if (!Options::useJIT()) disallowStubs(); + if (UNLIKELY(!Options::useLLIntICs())) + setVirtualCall(vm); } +std::tuple CallLinkInfo::retrieveCaller(JSCell* owner) +{ + auto* codeBlock = jsDynamicCast(owner); + if (!codeBlock) + return { }; + CodeOrigin codeOrigin = this->codeOrigin(); + if (auto* baselineCodeBlock = codeOrigin.codeOriginOwner()) + return std::tuple { baselineCodeBlock, codeOrigin.bytecodeIndex() }; + return std::tuple { codeBlock, codeOrigin.bytecodeIndex() }; +} + +void CallLinkInfo::reset(VM&) +{ #if ENABLE(JIT) + if (type() == CallLinkInfo::Type::Optimizing) + static_cast(this)->setSlowPathCallDestination(LLInt::defaultCall().code()); +#endif + if (stub()) + revertCallToStub(); + clearCallee(); // This also clears the inline cache both for data and code-based caches. + clearSeen(); + clearStub(); + if (isOnList()) + remove(); + m_mode = static_cast(Mode::Init); +} + +void CallLinkInfo::revertCall(VM& vm) +{ + if (UNLIKELY(!Options::useLLIntICs() && type() == CallLinkInfo::Type::Baseline)) + setVirtualCall(vm); + else + reset(vm); +} -void OptimizingCallLinkInfo::setFrameShuffleData(const CallFrameShuffleData& shuffleData) +void CallLinkInfo::setVirtualCall(VM& vm) { - m_frameShuffleData = makeUnique(shuffleData); - m_frameShuffleData->shrinkToFit(); + reset(vm); +#if ENABLE(JIT) + if (type() == Type::Optimizing) + static_cast(this)->setSlowPathCallDestination(vm.getCTIVirtualCall(callMode()).retagged().code()); +#endif + if (isDataIC()) { + m_callee.clear(); + *bitwise_cast(m_callee.slot()) = polymorphicCalleeMask; + u.dataIC.m_codeBlock = nullptr; // PolymorphicCallStubRoutine will set CodeBlock inside it. + u.dataIC.m_monomorphicCallDestination = vm.getCTIVirtualCall(callMode()).code().template retagged(); + } + setClearedByVirtual(); + m_mode = static_cast(Mode::Virtual); } -MacroAssembler::JumpList CallLinkInfo::emitFastPathImpl(CallLinkInfo* callLinkInfo, CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC useDataIC, bool isTailCall, ScopedLambda&& prepareForTailCall) +JSGlobalObject* CallLinkInfo::globalObjectForSlowPath(JSCell* owner) +{ + auto [codeBlock, bytecodeIndex] = retrieveCaller(owner); + if (codeBlock) + return codeBlock->globalObject(); +#if ENABLE(WEBASSEMBLY) + auto* module = jsDynamicCast(owner); + if (module) + return module->globalObject(); +#endif + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; +} + +#if ENABLE(JIT) + +void OptimizingCallLinkInfo::setSlowPathCallDestination(CodePtr codePtr) +{ + m_slowPathCallDestination = codePtr; +} + +std::tuple CallLinkInfo::emitFastPathImpl(CallLinkInfo* callLinkInfo, CCallHelpers& jit, UseDataIC useDataIC, bool isTailCall, ScopedLambda&& prepareForTailCall) { CCallHelpers::JumpList slowPath; if (useDataIC == UseDataIC::Yes) { - CCallHelpers::Jump goPolymorphic; +#if USE(JSVALUE32_64) + // We need this on JSVALUE32_64 only as on JSVALUE64 a pointer comparison in the DataIC fast + // path catches this. + auto failed = jit.branchIfNotCell(BaselineJITRegisters::Call::calleeJSR); +#endif // For RISCV64, scratch register usage here collides with MacroAssembler's internal usage // that's necessary for the test-and-branch operation but is avoidable by loading from the callee // address for each branch operation. Other MacroAssembler implementations handle this better by // using a wider range of scratch registers or more potent branching instructions. + CCallHelpers::JumpList found; + jit.loadPtr(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfMonomorphicCallDestination()), BaselineJITRegisters::Call::callTargetGPR); if constexpr (isRISCV64()) { - CCallHelpers::Address calleeAddress(callLinkInfoGPR, offsetOfCallee()); - goPolymorphic = jit.branchTestPtr(CCallHelpers::NonZero, calleeAddress, CCallHelpers::TrustedImm32(polymorphicCalleeMask)); - slowPath.append(jit.branchPtr(CCallHelpers::NotEqual, calleeAddress, calleeGPR)); + CCallHelpers::Address calleeAddress(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfCallee()); + found.append(jit.branchPtr(CCallHelpers::Equal, calleeAddress, BaselineJITRegisters::Call::calleeGPR)); + found.append(jit.branchTestPtr(CCallHelpers::NonZero, calleeAddress, CCallHelpers::TrustedImm32(polymorphicCalleeMask))); } else { GPRReg scratchGPR = jit.scratchRegister(); DisallowMacroScratchRegisterUsage disallowScratch(jit); - jit.loadPtr(CCallHelpers::Address(callLinkInfoGPR, offsetOfCallee()), scratchGPR); - goPolymorphic = jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR, CCallHelpers::TrustedImm32(polymorphicCalleeMask)); - slowPath.append(jit.branchPtr(CCallHelpers::NotEqual, scratchGPR, calleeGPR)); + jit.loadPtr(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfCallee()), scratchGPR); + found.append(jit.branchPtr(CCallHelpers::Equal, scratchGPR, BaselineJITRegisters::Call::calleeGPR)); + found.append(jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR, CCallHelpers::TrustedImm32(polymorphicCalleeMask))); } - if (isTailCall) { - prepareForTailCall(); - - GPRReg scratchGPR = CCallHelpers::selectScratchGPR(calleeGPR, callLinkInfoGPR); - jit.loadPtr(CCallHelpers::Address(callLinkInfoGPR, offsetOfCodeBlock()), scratchGPR); - jit.storePtr(scratchGPR, CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); - - goPolymorphic.link(&jit); // Polymorphic stub handles tail call stack prep. - jit.farJump(CCallHelpers::Address(callLinkInfoGPR, offsetOfMonomorphicCallDestination()), JSEntryPtrTag); - } else { - GPRReg scratchGPR = CCallHelpers::selectScratchGPR(calleeGPR, callLinkInfoGPR); - jit.loadPtr(CCallHelpers::Address(callLinkInfoGPR, offsetOfCodeBlock()), scratchGPR); - jit.storePtr(scratchGPR, CCallHelpers::calleeFrameCodeBlockBeforeCall()); - - goPolymorphic.link(&jit); - jit.call(CCallHelpers::Address(callLinkInfoGPR, offsetOfMonomorphicCallDestination()), JSEntryPtrTag); - } - } else { - CCallHelpers::DataLabelPtr calleeCheck; - slowPath.append(jit.branchPtrWithPatch(CCallHelpers::NotEqual, calleeGPR, calleeCheck, CCallHelpers::TrustedImmPtr(nullptr))); +#if USE(JSVALUE32_64) + failed.link(&jit); +#endif + jit.move(CCallHelpers::TrustedImmPtr(LLInt::defaultCall().code().taggedPtr()), BaselineJITRegisters::Call::callTargetGPR); - CCallHelpers::Call call; - CCallHelpers::DataLabelPtr codeBlockStore; + auto dispatch = jit.label(); + found.link(&jit); if (isTailCall) { prepareForTailCall(); - codeBlockStore = jit.storePtrWithPatch(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); - call = jit.nearTailCall(); + jit.transferPtr(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfCodeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); + jit.farJump(BaselineJITRegisters::Call::callTargetGPR, JSEntryPtrTag); } else { - codeBlockStore = jit.storePtrWithPatch(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::calleeFrameCodeBlockBeforeCall()); - call = jit.nearCall(); + jit.transferPtr(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfCodeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeCall()); + jit.call(BaselineJITRegisters::Call::callTargetGPR, JSEntryPtrTag); } + return std::tuple { slowPath, dispatch }; + } - RELEASE_ASSERT(callLinkInfo); - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - static_cast(callLinkInfo)->m_callLocation = linkBuffer.locationOfNearCall(call); - callLinkInfo->u.codeIC.m_codeBlockLocation = linkBuffer.locationOf(codeBlockStore); - callLinkInfo->u.codeIC.m_calleeLocation = linkBuffer.locationOf(calleeCheck); - }); + CCallHelpers::DataLabelPtr calleeCheck; + CCallHelpers::Call call; + CCallHelpers::DataLabelPtr codeBlockStore; + if (isTailCall) { + prepareForTailCall(); + slowPath.append(jit.branchPtrWithPatch(CCallHelpers::NotEqual, GPRInfo::regT0, calleeCheck, CCallHelpers::TrustedImmPtr(nullptr))); + codeBlockStore = jit.storePtrWithPatch(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); + call = jit.nearTailCall(); + } else { + slowPath.append(jit.branchPtrWithPatch(CCallHelpers::NotEqual, GPRInfo::regT0, calleeCheck, CCallHelpers::TrustedImmPtr(nullptr))); + codeBlockStore = jit.storePtrWithPatch(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::calleeFrameCodeBlockBeforeCall()); + call = jit.nearCall(); } - return slowPath; + RELEASE_ASSERT(callLinkInfo); + jit.addLinkTask([=] (LinkBuffer& linkBuffer) { + static_cast(callLinkInfo)->m_callLocation = linkBuffer.locationOfNearCall(call); + callLinkInfo->u.codeIC.m_codeBlockLocation = linkBuffer.locationOf(codeBlockStore); + callLinkInfo->u.codeIC.m_calleeLocation = linkBuffer.locationOf(calleeCheck); + }); + return std::tuple { slowPath, CCallHelpers::Label() }; } -MacroAssembler::JumpList CallLinkInfo::emitDataICFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR) +std::tuple CallLinkInfo::emitDataICFastPath(CCallHelpers& jit) { - RELEASE_ASSERT(callLinkInfoGPR != InvalidGPRReg); - return emitFastPathImpl(nullptr, jit, calleeGPR, callLinkInfoGPR, UseDataIC::Yes, false, nullptr); + return emitFastPathImpl(nullptr, jit, UseDataIC::Yes, false, nullptr); } -MacroAssembler::JumpList CallLinkInfo::emitTailCallDataICFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, ScopedLambda&& prepareForTailCall) +std::tuple CallLinkInfo::emitTailCallDataICFastPath(CCallHelpers& jit, ScopedLambda&& prepareForTailCall) { - RELEASE_ASSERT(callLinkInfoGPR != InvalidGPRReg); - return emitFastPathImpl(nullptr, jit, calleeGPR, callLinkInfoGPR, UseDataIC::Yes, true, WTFMove(prepareForTailCall)); + return emitFastPathImpl(nullptr, jit, UseDataIC::Yes, true, WTFMove(prepareForTailCall)); } void CallLinkInfo::setStub(Ref&& newStub) @@ -418,10 +430,10 @@ void CallLinkInfo::setStub(Ref&& newStub) clearStub(); m_stub = WTFMove(newStub); - m_calleeOrCodeBlock.clear(); + m_callee.clear(); if (isDataIC()) { - *bitwise_cast(m_calleeOrCodeBlock.slot()) = polymorphicCalleeMask; + *bitwise_cast(m_callee.slot()) = polymorphicCalleeMask; u.dataIC.m_codeBlock = nullptr; // PolymorphicCallStubRoutine will set CodeBlock inside it. u.dataIC.m_monomorphicCallDestination = m_stub->code().code().retagged(); } else { @@ -430,111 +442,197 @@ void CallLinkInfo::setStub(Ref&& newStub) MacroAssembler::startOfBranchPtrWithPatchOnRegister(u.codeIC.m_calleeLocation), CodeLocationLabel(m_stub->code().code())); } + m_mode = static_cast(Mode::Polymorphic); } -void CallLinkInfo::emitDataICSlowPath(VM&, CCallHelpers& jit, GPRReg callLinkInfoGPR) +void CallLinkInfo::emitSlowPathImpl(VM&, CCallHelpers& jit, UseDataIC useDataIC, bool isTailCall, MacroAssembler::Label dispatchLabel) { - jit.move(callLinkInfoGPR, GPRInfo::regT2); - jit.call(CCallHelpers::Address(GPRInfo::regT2, offsetOfSlowPathCallDestination()), JSEntryPtrTag); + if (useDataIC == UseDataIC::Yes) { +#if USE(JSVALUE32_64) + if (isTailCall) { + jit.move(CCallHelpers::TrustedImmPtr(LLInt::defaultCall().code().taggedPtr()), BaselineJITRegisters::Call::callTargetGPR); + jit.jump().linkTo(dispatchLabel, &jit); + return; + } + jit.nearCallThunk(CodeLocationLabel { LLInt::defaultCall().code() }.retagged()); +#else + UNUSED_PARAM(dispatchLabel); +#endif + return; + } + + if (isTailCall) + jit.farJump(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, OptimizingCallLinkInfo::offsetOfSlowPathCallDestination()), JSEntryPtrTag); + else + jit.call(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, OptimizingCallLinkInfo::offsetOfSlowPathCallDestination()), JSEntryPtrTag); } -MacroAssembler::JumpList CallLinkInfo::emitFastPath(CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo, GPRReg calleeGPR, GPRReg callLinkInfoGPR) +void CallLinkInfo::emitDataICSlowPath(VM& vm, CCallHelpers& jit, bool isTailCall, MacroAssembler::Label dispatchLabel) +{ + emitSlowPathImpl(vm, jit, UseDataIC::Yes, isTailCall, dispatchLabel); +} + +std::tuple CallLinkInfo::emitFastPath(CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo) { if (std::holds_alternative(callLinkInfo)) - return std::get(callLinkInfo)->emitFastPath(jit, calleeGPR, callLinkInfoGPR); + return std::get(callLinkInfo)->emitFastPath(jit); - return CallLinkInfo::emitDataICFastPath(jit, calleeGPR, callLinkInfoGPR); + return CallLinkInfo::emitDataICFastPath(jit); } -MacroAssembler::JumpList CallLinkInfo::emitTailCallFastPath(CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo, GPRReg calleeGPR, GPRReg callLinkInfoGPR, ScopedLambda&& prepareForTailCall) +std::tuple CallLinkInfo::emitTailCallFastPath(CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo, ScopedLambda&& prepareForTailCall) { if (std::holds_alternative(callLinkInfo)) - return std::get(callLinkInfo)->emitTailCallFastPath(jit, calleeGPR, callLinkInfoGPR, WTFMove(prepareForTailCall)); + return std::get(callLinkInfo)->emitTailCallFastPath(jit, WTFMove(prepareForTailCall)); - return CallLinkInfo::emitTailCallDataICFastPath(jit, calleeGPR, callLinkInfoGPR, WTFMove(prepareForTailCall)); + return CallLinkInfo::emitTailCallDataICFastPath(jit, WTFMove(prepareForTailCall)); } -void CallLinkInfo::emitSlowPath(VM& vm, CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo, GPRReg callLinkInfoGPR) +void CallLinkInfo::emitSlowPath(VM& vm, CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo) { if (std::holds_alternative(callLinkInfo)) { std::get(callLinkInfo)->emitSlowPath(vm, jit); return; } - emitDataICSlowPath(vm, jit, callLinkInfoGPR); + emitDataICSlowPath(vm, jit, /* isTailCall */ false, { }); } -CCallHelpers::JumpList OptimizingCallLinkInfo::emitFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR) +void CallLinkInfo::emitTailCallSlowPath(VM& vm, CCallHelpers& jit, CompileTimeCallLinkInfo callLinkInfo, MacroAssembler::Label dispatchLabel) { - RELEASE_ASSERT(!isTailCall()); - - if (isDataIC()) { - RELEASE_ASSERT(callLinkInfoGPR != GPRReg::InvalidGPRReg); - jit.move(CCallHelpers::TrustedImmPtr(this), callLinkInfoGPR); - setCallLinkInfoGPR(callLinkInfoGPR); + if (std::holds_alternative(callLinkInfo)) { + std::get(callLinkInfo)->emitTailCallSlowPath(vm, jit, dispatchLabel); + return; } + emitDataICSlowPath(vm, jit, /* isTailCall */ true, dispatchLabel); +} + +std::tuple OptimizingCallLinkInfo::emitFastPath(CCallHelpers& jit) +{ + RELEASE_ASSERT(!isTailCall()); - return emitFastPathImpl(this, jit, calleeGPR, callLinkInfoGPR, useDataIC(), isTailCall(), nullptr); + if (isDataIC()) + jit.move(CCallHelpers::TrustedImmPtr(this), BaselineJITRegisters::Call::callLinkInfoGPR); + return emitFastPathImpl(this, jit, useDataIC(), isTailCall(), nullptr); } -MacroAssembler::JumpList OptimizingCallLinkInfo::emitTailCallFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, ScopedLambda&& prepareForTailCall) +std::tuple OptimizingCallLinkInfo::emitTailCallFastPath(CCallHelpers& jit, ScopedLambda&& prepareForTailCall) { RELEASE_ASSERT(isTailCall()); - if (isDataIC()) { - RELEASE_ASSERT(callLinkInfoGPR != GPRReg::InvalidGPRReg); - jit.move(CCallHelpers::TrustedImmPtr(this), callLinkInfoGPR); - setCallLinkInfoGPR(callLinkInfoGPR); - } - - return emitFastPathImpl(this, jit, calleeGPR, callLinkInfoGPR, useDataIC(), isTailCall(), WTFMove(prepareForTailCall)); + if (isDataIC()) + jit.move(CCallHelpers::TrustedImmPtr(this), BaselineJITRegisters::Call::callLinkInfoGPR); + return emitFastPathImpl(this, jit, useDataIC(), isTailCall(), WTFMove(prepareForTailCall)); } void OptimizingCallLinkInfo::emitSlowPath(VM& vm, CCallHelpers& jit) { - setSlowPathCallDestination(vm.getCTILinkCall().code()); - jit.move(CCallHelpers::TrustedImmPtr(this), GPRInfo::regT2); - jit.call(CCallHelpers::Address(GPRInfo::regT2, offsetOfSlowPathCallDestination()), JSEntryPtrTag); + setSlowPathCallDestination(LLInt::defaultCall().code()); + RELEASE_ASSERT(!isTailCall()); + jit.move(CCallHelpers::TrustedImmPtr(this), BaselineJITRegisters::Call::callLinkInfoGPR); + return emitSlowPathImpl(vm, jit, useDataIC(), isTailCall(), { }); } -CodeLocationLabel OptimizingCallLinkInfo::slowPathStart() +void OptimizingCallLinkInfo::emitTailCallSlowPath(VM& vm, CCallHelpers& jit, MacroAssembler::Label dispatchLabel) { - RELEASE_ASSERT(isDirect() && !isDataIC()); - return m_slowPathStart; + setSlowPathCallDestination(LLInt::defaultCall().code()); + RELEASE_ASSERT(isTailCall()); + jit.move(CCallHelpers::TrustedImmPtr(this), BaselineJITRegisters::Call::callLinkInfoGPR); + return emitSlowPathImpl(vm, jit, useDataIC(), isTailCall(), dispatchLabel); } -CodeLocationLabel OptimizingCallLinkInfo::fastPathStart() +#if ENABLE(DFG_JIT) +void OptimizingCallLinkInfo::initializeFromDFGUnlinkedCallLinkInfo(VM&, const DFG::UnlinkedCallLinkInfo& unlinkedCallLinkInfo, CodeBlock* owner) { - RELEASE_ASSERT(isDirect() && isTailCall()); - return CodeLocationDataLabelPtr(m_fastPathStart); + m_owner = owner; + m_doneLocation = unlinkedCallLinkInfo.doneLocation; + setSlowPathCallDestination(LLInt::defaultCall().code()); + m_codeOrigin = unlinkedCallLinkInfo.codeOrigin; + m_callType = unlinkedCallLinkInfo.callType; } +#endif -void OptimizingCallLinkInfo::emitDirectFastPath(CCallHelpers& jit) +void DirectCallLinkInfo::reset() { - RELEASE_ASSERT(isDirect() && !isTailCall()); + if (isOnList()) + remove(); +#if ENABLE(JIT) + if (!isDataIC()) + initialize(); +#endif + m_target = { }; + m_codeBlock = nullptr; +} + +void DirectCallLinkInfo::unlinkOrUpgradeImpl(VM&, CodeBlock* oldCodeBlock, CodeBlock* newCodeBlock) +{ + if (isOnList()) + remove(); + + if (!!m_target) { + if (m_codeBlock && newCodeBlock && oldCodeBlock == m_codeBlock) { + ArityCheckMode arityCheck = oldCodeBlock->jitCode()->addressForCall(ArityCheckNotRequired) == m_target ? ArityCheckNotRequired : MustCheckArity; + auto target = newCodeBlock->jitCode()->addressForCall(arityCheck); + setCallTarget(newCodeBlock, CodeLocationLabel { target }); + newCodeBlock->linkIncomingCall(nullptr, this); // This is just relinking. So owner and caller frame can be nullptr. + return; + } + dataLogLnIf(Options::dumpDisassembly(), "Unlinking CallLinkInfo: ", RawPointer(this)); + reset(); + } + + // Either we were unlinked, in which case we should not have been on any list, or we unlinked + // ourselves so that we're not on any list anymore. + RELEASE_ASSERT(!isOnList()); +} + +void DirectCallLinkInfo::visitWeak(VM& vm) +{ + if (m_codeBlock && !vm.heap.isMarked(m_codeBlock)) { + dataLogLnIf(Options::verboseOSR(), "Clearing call to ", RawPointer(m_codeBlock), " (", pointerDump(m_codeBlock), ")."); + unlinkOrUpgrade(vm, nullptr, nullptr); + } +} + +CCallHelpers::JumpList DirectCallLinkInfo::emitDirectFastPath(CCallHelpers& jit) +{ + RELEASE_ASSERT(!isTailCall()); - ASSERT(UseDataIC::No == this->useDataIC()); + if (isDataIC()) { + CCallHelpers::JumpList slowPath; + jit.move(CCallHelpers::TrustedImmPtr(this), BaselineJITRegisters::Call::callLinkInfoGPR); + slowPath.append(jit.branchTestPtr(CCallHelpers::Zero, CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfTarget()))); + jit.transferPtr(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfCodeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeCall()); + jit.call(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfTarget()), JSEntryPtrTag); + return slowPath; + } auto codeBlockStore = jit.storePtrWithPatch(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::calleeFrameCodeBlockBeforeCall()); auto call = jit.nearCall(); jit.addLinkTask([=, this] (LinkBuffer& linkBuffer) { m_callLocation = linkBuffer.locationOfNearCall(call); - u.codeIC.m_codeBlockLocation = linkBuffer.locationOf(codeBlockStore); + m_codeBlockLocation = linkBuffer.locationOf(codeBlockStore); }); - jit.addLateLinkTask([this] (LinkBuffer&) { - initializeDirectCall(); + jit.addLateLinkTask([this](LinkBuffer&) { + repatchSpeculatively(); }); + return { }; } -void OptimizingCallLinkInfo::emitDirectTailCallFastPath(CCallHelpers& jit, ScopedLambda&& prepareForTailCall) +CCallHelpers::JumpList DirectCallLinkInfo::emitDirectTailCallFastPath(CCallHelpers& jit, ScopedLambda&& prepareForTailCall) { - RELEASE_ASSERT(isDirect() && isTailCall()); + RELEASE_ASSERT(isTailCall()); - ASSERT(UseDataIC::No == this->useDataIC()); + if (isDataIC()) { + CCallHelpers::JumpList slowPath; + jit.move(CCallHelpers::TrustedImmPtr(this), BaselineJITRegisters::Call::callLinkInfoGPR); + slowPath.append(jit.branchTestPtr(CCallHelpers::Zero, CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfTarget()))); + prepareForTailCall(); + jit.transferPtr(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfCodeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); + jit.farJump(CCallHelpers::Address(BaselineJITRegisters::Call::callLinkInfoGPR, offsetOfTarget()), JSEntryPtrTag); + return slowPath; + } auto fastPathStart = jit.label(); - jit.addLinkTask([=, this] (LinkBuffer& linkBuffer) { - m_fastPathStart = linkBuffer.locationOf(fastPathStart); - }); // - If we're not yet linked, this is a jump to the slow path. // - Once we're linked to a fast path, this goes back to being nops so we fall through to the linked jump. @@ -544,68 +642,128 @@ void OptimizingCallLinkInfo::emitDirectTailCallFastPath(CCallHelpers& jit, Scope auto codeBlockStore = jit.storePtrWithPatch(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); auto call = jit.nearTailCall(); jit.addLinkTask([=, this] (LinkBuffer& linkBuffer) { + m_fastPathStart = linkBuffer.locationOf(fastPathStart); m_callLocation = linkBuffer.locationOfNearCall(call); - u.codeIC.m_codeBlockLocation = linkBuffer.locationOf(codeBlockStore); + m_codeBlockLocation = linkBuffer.locationOf(codeBlockStore); }); - jit.addLateLinkTask([this] (LinkBuffer&) { - initializeDirectCall(); + jit.addLateLinkTask([this](LinkBuffer&) { + repatchSpeculatively(); }); + return { }; } -void OptimizingCallLinkInfo::initializeDirectCall() +void DirectCallLinkInfo::initialize() { - RELEASE_ASSERT(isDirect()); ASSERT(m_callLocation); - ASSERT(u.codeIC.m_codeBlockLocation); + ASSERT(m_codeBlockLocation); if (isTailCall()) { RELEASE_ASSERT(fastPathStart()); - CCallHelpers::emitJITCodeOver(fastPathStart(), scopedLambda([&](CCallHelpers& jit) { - auto jump = jit.jump(); - jit.addLinkTask([=, this] (LinkBuffer& linkBuffer) { - linkBuffer.link(jump, slowPathStart()); - }); - }), "initialize direct call"); + CCallHelpers::replaceWithJump(fastPathStart(), slowPathStart()); } else MacroAssembler::repatchNearCall(m_callLocation, slowPathStart()); } -void OptimizingCallLinkInfo::setDirectCallTarget(CodeBlock* codeBlock, CodeLocationLabel target) +void DirectCallLinkInfo::setCallTarget(CodeBlock* codeBlock, CodeLocationLabel target) { - RELEASE_ASSERT(isDirect()); + m_codeBlock = codeBlock; + m_target = target; - if (isTailCall()) { - RELEASE_ASSERT(fastPathStart()); - CCallHelpers::emitJITCodeOver(fastPathStart(), scopedLambda([&](CCallHelpers& jit) { + if (!isDataIC()) { + if (isTailCall()) { + RELEASE_ASSERT(fastPathStart()); // We reserved this many bytes for the jump at fastPathStart(). Make that // code nops now so we fall through to the jump to the fast path. - jit.emitNops(CCallHelpers::patchableJumpSize()); - }), "Setting direct call target"); - } + CCallHelpers::replaceWithNops(fastPathStart(), CCallHelpers::patchableJumpSize()); + } - MacroAssembler::repatchNearCall(m_callLocation, target); - MacroAssembler::repatchPointer(u.codeIC.m_codeBlockLocation, codeBlock); + MacroAssembler::repatchNearCall(m_callLocation, target); + MacroAssembler::repatchPointer(m_codeBlockLocation, codeBlock); + } } -void OptimizingCallLinkInfo::setDirectCallMaxArgumentCountIncludingThis(unsigned value) +void DirectCallLinkInfo::setMaxArgumentCountIncludingThis(unsigned value) { - RELEASE_ASSERT(isDirect()); RELEASE_ASSERT(value); - m_maxArgumentCountIncludingThisForDirectCall = value; + m_maxArgumentCountIncludingThis = value; } -#if ENABLE(DFG_JIT) -void OptimizingCallLinkInfo::initializeFromDFGUnlinkedCallLinkInfo(VM& vm, const DFG::UnlinkedCallLinkInfo& unlinkedCallLinkInfo) +std::tuple> DirectCallLinkInfo::retrieveCallInfo(FunctionExecutable* functionExecutable) { - m_doneLocation = unlinkedCallLinkInfo.doneLocation; - setSlowPathCallDestination(vm.getCTILinkCall().code()); - m_codeOrigin = unlinkedCallLinkInfo.codeOrigin; - m_callType = unlinkedCallLinkInfo.callType; - m_calleeGPR = unlinkedCallLinkInfo.calleeGPR; - m_callLinkInfoGPR = unlinkedCallLinkInfo.callLinkInfoGPR; - if (unlinkedCallLinkInfo.m_frameShuffleData) - m_frameShuffleData = makeUnique(*unlinkedCallLinkInfo.m_frameShuffleData); + CodeSpecializationKind kind = specializationKind(); + CodeBlock* codeBlock = functionExecutable->codeBlockFor(kind); + if (!codeBlock) + return { }; + + CodeBlock* ownerCodeBlock = jsDynamicCast(owner()); + if (!ownerCodeBlock) + return { }; + + if (ownerCodeBlock->alternative() == codeBlock) + return { }; + + unsigned argumentStackSlots = maxArgumentCountIncludingThis(); + ArityCheckMode arityCheckMode = (argumentStackSlots < static_cast(codeBlock->numParameters())) ? MustCheckArity : ArityCheckNotRequired; + CodePtr codePtr = codeBlock->addressForCallConcurrently(arityCheckMode); + if (!codePtr) + return { }; + + return std::tuple { codeBlock, codePtr }; +} + +void DirectCallLinkInfo::repatchSpeculatively() +{ + if (m_executable->isHostFunction()) { + CodeSpecializationKind kind = specializationKind(); + CodePtr codePtr; + if (kind == CodeForCall) + codePtr = m_executable->generatedJITCodeWithArityCheckForCall(); + else + codePtr = m_executable->generatedJITCodeWithArityCheckForConstruct(); + if (codePtr) + setCallTarget(nullptr, CodeLocationLabel { codePtr }); + else + initialize(); + return; + } + + FunctionExecutable* functionExecutable = jsDynamicCast(m_executable); + if (!functionExecutable) { + initialize(); + return; + } + + auto [codeBlock, codePtr] = retrieveCallInfo(functionExecutable); + if (codeBlock && codePtr) { + m_codeBlock = codeBlock; + m_target = codePtr; + // Do not chain |this| to the calle codeBlock concurrently. It will be done in the main thread if the speculatively repatched one is still valid. + setCallTarget(codeBlock, CodeLocationLabel { codePtr }); + return; + } + + initialize(); +} + +void DirectCallLinkInfo::validateSpeculativeRepatchOnMainThread(VM&) +{ + constexpr bool verbose = false; + FunctionExecutable* functionExecutable = jsDynamicCast(m_executable); + if (!functionExecutable) + return; + + auto [codeBlock, codePtr] = retrieveCallInfo(functionExecutable); + if (m_codeBlock != codeBlock || m_target != codePtr) { + dataLogLnIf(verbose, "Speculative repatching failed ", RawPointer(m_codeBlock), " ", m_target, " => ", RawPointer(codeBlock), " ", codePtr); + if (codeBlock && codePtr) + setCallTarget(codeBlock, CodeLocationLabel { codePtr }); + else + reset(); + } else + dataLogLnIf(verbose, "Speculative repatching succeeded ", RawPointer(m_codeBlock), " ", m_target); + + if (m_codeBlock) + m_codeBlock->linkIncomingCall(owner(), this); } -#endif #endif diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.h index 760f698b..3277f05a 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfo.h @@ -26,7 +26,9 @@ #pragma once #include "BaselineJITRegisters.h" +#include "CallFrame.h" #include "CallFrameShuffleData.h" +#include "CallLinkInfoBase.h" #include "CallMode.h" #include "CodeLocation.h" #include "CodeOrigin.h" @@ -53,9 +55,10 @@ struct CallFrameShuffleData; struct UnlinkedCallLinkInfo; struct BaselineUnlinkedCallLinkInfo; + using CompileTimeCallLinkInfo = std::variant; -class CallLinkInfo : public PackedRawSentinelNode { +class CallLinkInfo : public CallLinkInfoBase { public: friend class LLIntOffsetsExtractor; @@ -66,17 +69,11 @@ class CallLinkInfo : public PackedRawSentinelNode { Optimizing, }; - enum CallType : uint8_t { - None, - Call, - CallVarargs, - Construct, - ConstructVarargs, - TailCall, - TailCallVarargs, - DirectCall, - DirectConstruct, - DirectTailCall + enum class Mode : uint8_t { + Init, + Monomorphic, + Polymorphic, + Virtual, }; static constexpr uintptr_t polymorphicCalleeMask = 1; @@ -106,62 +103,12 @@ class CallLinkInfo : public PackedRawSentinelNode { { return specializationKindFor(static_cast(m_callType)); } - - static CallMode callModeFor(CallType callType) - { - switch (callType) { - case Call: - case CallVarargs: - case DirectCall: - return CallMode::Regular; - case TailCall: - case TailCallVarargs: - case DirectTailCall: - return CallMode::Tail; - case Construct: - case ConstructVarargs: - case DirectConstruct: - return CallMode::Construct; - case None: - RELEASE_ASSERT_NOT_REACHED(); - } - - RELEASE_ASSERT_NOT_REACHED(); - } - - static bool isDirect(CallType callType) - { - switch (callType) { - case DirectCall: - case DirectTailCall: - case DirectConstruct: - return true; - case Call: - case CallVarargs: - case TailCall: - case TailCallVarargs: - case Construct: - case ConstructVarargs: - return false; - case None: - RELEASE_ASSERT_NOT_REACHED(); - return false; - } - RELEASE_ASSERT_NOT_REACHED(); - return false; - } - CallMode callMode() const { return callModeFor(static_cast(m_callType)); } - bool isDirect() const - { - return isDirect(static_cast(m_callType)); - } - bool isTailCall() const { return callMode() == CallMode::Tail; @@ -177,22 +124,23 @@ class CallLinkInfo : public PackedRawSentinelNode { return isVarargsCallType(static_cast(m_callType)); } - bool isLinked() const { return stub() || m_calleeOrCodeBlock; } - void unlink(VM&); - - enum class UseDataIC : bool { No, Yes }; + bool isLinked() const { return mode() != Mode::Init && mode() != Mode::Virtual; } + void unlinkOrUpgradeImpl(VM&, CodeBlock* oldCodeBlock, CodeBlock* newCodeBlock); #if ENABLE(JIT) protected: - static MacroAssembler::JumpList emitFastPathImpl(CallLinkInfo*, CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC, bool isTailCall, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; - static MacroAssembler::JumpList emitDataICFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR) WARN_UNUSED_RETURN; - static void emitDataICSlowPath(VM&, CCallHelpers&, GPRReg callLinkInfoGPR); - static MacroAssembler::JumpList emitTailCallDataICFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; + static std::tuple emitFastPathImpl(CallLinkInfo*, CCallHelpers&, UseDataIC, bool isTailCall, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; + static std::tuple emitDataICFastPath(CCallHelpers&) WARN_UNUSED_RETURN; + static std::tuple emitTailCallDataICFastPath(CCallHelpers&, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; + + static void emitSlowPathImpl(VM&, CCallHelpers&, UseDataIC, bool isTailCall, MacroAssembler::Label); + static void emitDataICSlowPath(VM&, CCallHelpers&, bool isTailCall, MacroAssembler::Label); public: - static MacroAssembler::JumpList emitFastPath(CCallHelpers&, CompileTimeCallLinkInfo, GPRReg calleeGPR, GPRReg callLinkInfoGPR) WARN_UNUSED_RETURN; - static MacroAssembler::JumpList emitTailCallFastPath(CCallHelpers&, CompileTimeCallLinkInfo, GPRReg calleeGPR, GPRReg callLinkInfoGPR, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; - static void emitSlowPath(VM&, CCallHelpers&, CompileTimeCallLinkInfo, GPRReg callLinkInfoGPR); + static std::tuple emitFastPath(CCallHelpers&, CompileTimeCallLinkInfo) WARN_UNUSED_RETURN; + static std::tuple emitTailCallFastPath(CCallHelpers&, CompileTimeCallLinkInfo, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; + static void emitSlowPath(VM&, CCallHelpers&, CompileTimeCallLinkInfo); + static void emitTailCallSlowPath(VM&, CCallHelpers&, CompileTimeCallLinkInfo, MacroAssembler::Label); #endif void revertCallToStub(); @@ -210,16 +158,10 @@ class CallLinkInfo : public PackedRawSentinelNode { CodeLocationLabel doneLocation(); void setMonomorphicCallee(VM&, JSCell*, JSObject* callee, CodeBlock*, CodePtr); - void setSlowPathCallDestination(CodePtr); void clearCallee(); JSObject* callee(); - void setCodeBlock(VM&, JSCell*, FunctionCodeBlock*); - void clearCodeBlock(); - FunctionCodeBlock* codeBlock(); - void setLastSeenCallee(VM&, const JSCell* owner, JSObject* callee); - void clearLastSeenCallee(); JSObject* lastSeenCallee() const; bool haveLastSeenCallee() const; @@ -231,6 +173,10 @@ class CallLinkInfo : public PackedRawSentinelNode { #endif void clearStub(); + void setVirtualCall(VM&); + + void revertCall(VM&); + PolymorphicCallStubRoutine* stub() const { #if ENABLE(JIT) @@ -313,7 +259,7 @@ class CallLinkInfo : public PackedRawSentinelNode { static ptrdiff_t offsetOfCallee() { - return OBJECT_OFFSETOF(CallLinkInfo, m_calleeOrCodeBlock); + return OBJECT_OFFSETOF(CallLinkInfo, m_callee); } static ptrdiff_t offsetOfCodeBlock() @@ -326,14 +272,11 @@ class CallLinkInfo : public PackedRawSentinelNode { return OBJECT_OFFSETOF(CallLinkInfo, u) + OBJECT_OFFSETOF(UnionType, dataIC.m_monomorphicCallDestination); } - static ptrdiff_t offsetOfSlowPathCallDestination() +#if ENABLE(JIT) + static ptrdiff_t offsetOfStub() { - return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCallDestination); + return OBJECT_OFFSETOF(CallLinkInfo, m_stub); } - -#if ENABLE(JIT) - GPRReg calleeGPR() const; - GPRReg callLinkInfoGPR() const; #endif uint32_t slowPathCount() @@ -353,13 +296,10 @@ class CallLinkInfo : public PackedRawSentinelNode { #else RELEASE_ASSERT_NOT_REACHED(); #endif - } else { - functor(m_calleeOrCodeBlock.get()); - if (isDirect()) - functor(m_lastSeenCalleeOrExecutable.get()); - } + } else + functor(m_callee.get()); } - if (!isDirect() && haveLastSeenCallee()) + if (haveLastSeenCallee()) functor(lastSeenCallee()); } @@ -367,31 +307,49 @@ class CallLinkInfo : public PackedRawSentinelNode { Type type() const { return static_cast(m_type); } + Mode mode() const { return static_cast(m_mode); } + + JSCell* owner() const { return m_owner; } + + JSCell* ownerForSlowPath(CallFrame* calleeFrame); + + JSGlobalObject* globalObjectForSlowPath(JSCell* owner); + + std::tuple retrieveCaller(JSCell* owner); + protected: - CallLinkInfo(Type type, UseDataIC useDataIC) - : m_hasSeenShouldRepatch(false) - , m_hasSeenClosure(false) - , m_clearedByGC(false) - , m_clearedByVirtual(false) - , m_allowStubs(true) - , m_callType(None) + CallLinkInfo(Type type, UseDataIC useDataIC, JSCell* owner) + : CallLinkInfoBase(CallSiteType::CallLinkInfo) , m_useDataIC(static_cast(useDataIC)) , m_type(static_cast(type)) + , m_owner(owner) { ASSERT(type == this->type()); ASSERT(useDataIC == this->useDataIC()); } + void reset(VM&); + + bool m_hasSeenShouldRepatch : 1 { false }; + bool m_hasSeenClosure : 1 { false }; + bool m_clearedByGC : 1 { false }; + bool m_clearedByVirtual : 1 { false }; + bool m_allowStubs : 1 { true }; + unsigned m_callType : 4 { CallType::None }; // CallType + unsigned m_useDataIC : 1; // UseDataIC + unsigned m_type : 1; // Type + unsigned m_mode : 3 { static_cast(Mode::Init) }; // Mode + uint8_t m_maxArgumentCountIncludingThisForVarargs { 0 }; // For varargs: the profiled maximum number of arguments. For direct: the number of stack slots allocated for arguments. uint32_t m_slowPathCount { 0 }; + CodeLocationLabel m_doneLocation; - CodePtr m_slowPathCallDestination; union UnionType { UnionType() : dataIC { nullptr, nullptr } { } struct DataIC { - CodeBlock* m_codeBlock; // This is weekly held. And cleared whenever m_monomorphicCallDestination is changed. + CodeBlock* m_codeBlock; // This is weakly held. And cleared whenever m_monomorphicCallDestination is changed. CodePtr m_monomorphicCallDestination; } dataIC; @@ -401,30 +359,22 @@ class CallLinkInfo : public PackedRawSentinelNode { } codeIC; } u; - WriteBarrier m_calleeOrCodeBlock; - WriteBarrier m_lastSeenCalleeOrExecutable; + WriteBarrier m_callee; + WriteBarrier m_lastSeenCallee; #if ENABLE(JIT) RefPtr m_stub; #endif - bool m_hasSeenShouldRepatch : 1; - bool m_hasSeenClosure : 1; - bool m_clearedByGC : 1; - bool m_clearedByVirtual : 1; - bool m_allowStubs : 1; - unsigned m_callType : 4; // CallType - unsigned m_useDataIC : 1; // UseDataIC - unsigned m_type : 1; // Type - uint8_t m_maxArgumentCountIncludingThisForVarargs { 0 }; // For varargs: the profiled maximum number of arguments. For direct: the number of stack slots allocated for arguments. + JSCell* m_owner { nullptr }; }; class BaselineCallLinkInfo final : public CallLinkInfo { public: BaselineCallLinkInfo() - : CallLinkInfo(Type::Baseline, UseDataIC::Yes) + : CallLinkInfo(Type::Baseline, UseDataIC::Yes, nullptr) { } - void initialize(VM&, CallType, BytecodeIndex); + void initialize(VM&, CodeBlock*, CallType, BytecodeIndex); void setCodeLocations(CodeLocationLabel doneLocation) { @@ -433,11 +383,6 @@ class BaselineCallLinkInfo final : public CallLinkInfo { CodeOrigin codeOrigin() const { return CodeOrigin { m_bytecodeIndex }; } -#if ENABLE(JIT) - static constexpr GPRReg calleeGPR() { return BaselineJITRegisters::Call::calleeGPR; } - static constexpr GPRReg callLinkInfoGPR() { return BaselineJITRegisters::Call::callLinkInfoGPR; } -#endif - private: BytecodeIndex m_bytecodeIndex { }; }; @@ -450,114 +395,161 @@ inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo) struct UnlinkedCallLinkInfo { CodeLocationLabel doneLocation; - void setCodeLocations(CodeLocationLabel, CodeLocationLabel doneLocation) + void setCodeLocations(CodeLocationLabel doneLocation) { this->doneLocation = doneLocation; } }; -struct BaselineUnlinkedCallLinkInfo : public UnlinkedCallLinkInfo { +struct BaselineUnlinkedCallLinkInfo : public JSC::UnlinkedCallLinkInfo { BytecodeIndex bytecodeIndex; // Currently, only used by baseline, so this can trivially produce a CodeOrigin. #if ENABLE(JIT) - void setUpCall(CallLinkInfo::CallType, GPRReg) { } + void setUpCall(CallLinkInfo::CallType) { } #endif - void setFrameShuffleData(const CallFrameShuffleData&) { } }; #if ENABLE(JIT) -class OptimizingCallLinkInfo final : public CallLinkInfo { +class DirectCallLinkInfo final : public CallLinkInfoBase { + WTF_MAKE_NONCOPYABLE(DirectCallLinkInfo); public: - friend class CallLinkInfo; + DirectCallLinkInfo(CodeOrigin codeOrigin, UseDataIC useDataIC, JSCell* owner, ExecutableBase* executable) + : CallLinkInfoBase(CallSiteType::DirectCall) + , m_useDataIC(useDataIC) + , m_codeOrigin(codeOrigin) + , m_owner(owner) + , m_executable(executable) + { } - OptimizingCallLinkInfo() - : CallLinkInfo(Type::Optimizing, UseDataIC::Yes) + ~DirectCallLinkInfo() { + m_target = { }; + m_codeBlock = nullptr; } - OptimizingCallLinkInfo(CodeOrigin codeOrigin, UseDataIC useDataIC) - : CallLinkInfo(Type::Optimizing, useDataIC) - , m_codeOrigin(codeOrigin) + void setCallType(CallType callType) { + m_callType = callType; } - void setUpCall(CallType callType, GPRReg calleeGPR) + CallType callType() { - m_callType = callType; - m_calleeGPR = calleeGPR; + return static_cast(m_callType); } - void setCodeLocations( - CodeLocationLabel slowPathStart, - CodeLocationLabel doneLocation) + CallMode callMode() const { - if (!isDataIC()) - m_slowPathStart = slowPathStart; - m_doneLocation = doneLocation; + return callModeFor(static_cast(m_callType)); } - CodeLocationLabel fastPathStart(); - CodeLocationLabel slowPathStart(); - - GPRReg calleeGPR() const { return m_calleeGPR; } - GPRReg callLinkInfoGPR() const { return m_callLinkInfoGPR; } - void setCallLinkInfoGPR(GPRReg callLinkInfoGPR) { m_callLinkInfoGPR = callLinkInfoGPR; } - - void emitDirectFastPath(CCallHelpers&); - void emitDirectTailCallFastPath(CCallHelpers&, ScopedLambda&& prepareForTailCall); - void initializeDirectCall(); - void setDirectCallTarget(CodeBlock*, CodeLocationLabel); - void setDirectCallMaxArgumentCountIncludingThis(unsigned); - unsigned maxArgumentCountIncludingThisForDirectCall() const { return m_maxArgumentCountIncludingThisForDirectCall; } - void emitSlowPath(VM&, CCallHelpers&); + bool isTailCall() const + { + return callMode() == CallMode::Tail; + } - void setFrameShuffleData(const CallFrameShuffleData&); + CodeSpecializationKind specializationKind() const + { + auto callType = static_cast(m_callType); + return specializationFromIsConstruct(callType == DirectConstruct); + } - const CallFrameShuffleData* frameShuffleData() + void setCodeLocations(CodeLocationLabel slowPathStart) { - return m_frameShuffleData.get(); + m_slowPathStart = slowPathStart; } + static ptrdiff_t offsetOfTarget() { return OBJECT_OFFSETOF(DirectCallLinkInfo, m_target); }; + static ptrdiff_t offsetOfCodeBlock() { return OBJECT_OFFSETOF(DirectCallLinkInfo, m_codeBlock); }; + + JSCell* owner() const { return m_owner; } + + void unlinkOrUpgradeImpl(VM&, CodeBlock* oldCodeBlock, CodeBlock* newCodeBlock); + + void visitWeak(VM&); + CodeOrigin codeOrigin() const { return m_codeOrigin; } + bool isDataIC() const { return m_useDataIC == UseDataIC::Yes; } - void initializeFromDFGUnlinkedCallLinkInfo(VM&, const DFG::UnlinkedCallLinkInfo&); + MacroAssembler::JumpList emitDirectFastPath(CCallHelpers&); + MacroAssembler::JumpList emitDirectTailCallFastPath(CCallHelpers&, ScopedLambda&& prepareForTailCall); + void setCallTarget(CodeBlock*, CodeLocationLabel); + void setMaxArgumentCountIncludingThis(unsigned); + unsigned maxArgumentCountIncludingThis() const { return m_maxArgumentCountIncludingThis; } + + void reset(); + + void validateSpeculativeRepatchOnMainThread(VM&); private: - MacroAssembler::JumpList emitFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR) WARN_UNUSED_RETURN; - MacroAssembler::JumpList emitTailCallFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; + CodeLocationLabel slowPathStart() const { return m_slowPathStart; } + CodeLocationLabel fastPathStart() const { return m_fastPathStart; } - CodeOrigin m_codeOrigin; - CodeLocationNearCall m_callLocation NO_UNIQUE_ADDRESS; - GPRReg m_calleeGPR { InvalidGPRReg }; - GPRReg m_callLinkInfoGPR { InvalidGPRReg }; - unsigned m_maxArgumentCountIncludingThisForDirectCall { 0 }; + void initialize(); + void repatchSpeculatively(); + + std::tuple> retrieveCallInfo(FunctionExecutable*); + + CallType m_callType : 4; + UseDataIC m_useDataIC : 1; + unsigned m_maxArgumentCountIncludingThis { 0 }; + CodePtr m_target; + CodeBlock* m_codeBlock { nullptr }; // This is weakly held. And cleared whenever m_target is changed. + CodeOrigin m_codeOrigin { }; CodeLocationLabel m_slowPathStart; CodeLocationLabel m_fastPathStart; - std::unique_ptr m_frameShuffleData; + CodeLocationDataLabelPtr m_codeBlockLocation; + CodeLocationNearCall m_callLocation NO_UNIQUE_ADDRESS; + JSCell* m_owner; + ExecutableBase* m_executable { nullptr }; // This is weakly held. DFG / FTL CommonData already ensures this. }; -inline GPRReg CallLinkInfo::calleeGPR() const -{ - switch (type()) { - case Type::Baseline: - return static_cast(this)->calleeGPR(); - case Type::Optimizing: - return static_cast(this)->calleeGPR(); +class OptimizingCallLinkInfo final : public CallLinkInfo { +public: + friend class CallLinkInfo; + + OptimizingCallLinkInfo() + : CallLinkInfo(Type::Optimizing, UseDataIC::Yes, nullptr) + { } - return InvalidGPRReg; -} -inline GPRReg CallLinkInfo::callLinkInfoGPR() const -{ - switch (type()) { - case Type::Baseline: - return static_cast(this)->callLinkInfoGPR(); - case Type::Optimizing: - return static_cast(this)->callLinkInfoGPR(); + OptimizingCallLinkInfo(CodeOrigin codeOrigin, UseDataIC useDataIC, JSCell* owner) + : CallLinkInfo(Type::Optimizing, useDataIC, owner) + , m_codeOrigin(codeOrigin) + { } - return InvalidGPRReg; -} + + void setUpCall(CallType callType) + { + m_callType = callType; + } + + void setCodeLocations(CodeLocationLabel doneLocation) + { + m_doneLocation = doneLocation; + } + + void setSlowPathCallDestination(CodePtr); + + CodeOrigin codeOrigin() const { return m_codeOrigin; } + + void initializeFromDFGUnlinkedCallLinkInfo(VM&, const DFG::UnlinkedCallLinkInfo&, CodeBlock*); + + static ptrdiff_t offsetOfSlowPathCallDestination() + { + return OBJECT_OFFSETOF(OptimizingCallLinkInfo, m_slowPathCallDestination); + } + +private: + std::tuple emitFastPath(CCallHelpers&) WARN_UNUSED_RETURN; + std::tuple emitTailCallFastPath(CCallHelpers&, ScopedLambda&& prepareForTailCall) WARN_UNUSED_RETURN; + void emitSlowPath(VM&, CCallHelpers&); + void emitTailCallSlowPath(VM&, CCallHelpers&, MacroAssembler::Label); + + CodeOrigin m_codeOrigin; + CodePtr m_slowPathCallDestination; + CodeLocationNearCall m_callLocation NO_UNIQUE_ADDRESS; +}; #endif @@ -576,4 +568,16 @@ inline CodeOrigin CallLinkInfo::codeOrigin() const return { }; } +inline JSCell* CallLinkInfo::ownerForSlowPath(CallFrame* calleeFrame) +{ + if (m_owner) + return m_owner; + + // Right now, IC (Getter, Setter, Proxy IC etc.) / WasmToJS sets nullptr intentionally since we would like to share IC / WasmToJS thunk eventually. + // However, in that case, each IC's data side will have CallLinkInfo. + // At that time, they should have appropriate owner. So this is a hack only for now. + // This should always works since IC only performs regular-calls and it never does tail-calls. + return calleeFrame->callerFrame()->codeOwnerCell(); +} + } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfoBase.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfoBase.cpp new file mode 100644 index 00000000..bdffce46 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfoBase.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2023 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallLinkInfoBase.h" + +#include "CachedCall.h" +#include "CallLinkInfo.h" +#include "JSCJSValueInlines.h" +#include "PolymorphicCallStubRoutine.h" + +namespace JSC { + +void CallLinkInfoBase::unlinkOrUpgrade(VM& vm, CodeBlock* oldCodeBlock, CodeBlock* newCodeBlock) +{ + switch (callSiteType()) { + case CallSiteType::CallLinkInfo: + static_cast(this)->unlinkOrUpgradeImpl(vm, oldCodeBlock, newCodeBlock); + break; +#if ENABLE(JIT) + case CallSiteType::PolymorphicCallNode: + static_cast(this)->unlinkOrUpgradeImpl(vm, oldCodeBlock, newCodeBlock); + break; + case CallSiteType::DirectCall: + static_cast(this)->unlinkOrUpgradeImpl(vm, oldCodeBlock, newCodeBlock); + break; +#endif + case CallSiteType::CachedCall: + static_cast(this)->unlinkOrUpgradeImpl(vm, oldCodeBlock, newCodeBlock); + break; + } +} + +} // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfoBase.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfoBase.h new file mode 100644 index 00000000..70292443 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkInfoBase.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2023 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "ArityCheckMode.h" +#include "CallMode.h" +#include "JSCPtrTag.h" +#include +#include + +namespace JSC { + +class CodeBlock; +class JSCell; +class VM; + +class CallSlot { +public: + JSCell* m_calleeOrExecutable { nullptr }; + uint32_t m_count { 0 }; + uint8_t m_index { 0 }; + ArityCheckMode m_arityCheckMode { MustCheckArity }; + CodePtr m_target; + CodeBlock* m_codeBlock { nullptr }; // This is weakly held. And cleared whenever m_target is changed. + + static ptrdiff_t offsetOfCalleeOrExecutable() { return OBJECT_OFFSETOF(CallSlot, m_calleeOrExecutable); } + static ptrdiff_t offsetOfCount() { return OBJECT_OFFSETOF(CallSlot, m_count); } + static ptrdiff_t offsetOfTarget() { return OBJECT_OFFSETOF(CallSlot, m_target); } + static ptrdiff_t offsetOfCodeBlock() { return OBJECT_OFFSETOF(CallSlot, m_codeBlock); } +}; +static_assert(sizeof(CallSlot) <= 32, "This should be small enough to keep iteration of vector in polymorphic call fast"); + +class CallLinkInfoBase : public BasicRawSentinelNode { +public: + enum class CallSiteType : uint8_t { + CallLinkInfo, +#if ENABLE(JIT) + PolymorphicCallNode, + DirectCall, +#endif + CachedCall, + }; + + enum CallType : uint8_t { + None, + Call, + CallVarargs, + Construct, + ConstructVarargs, + TailCall, + TailCallVarargs, + DirectCall, + DirectConstruct, + DirectTailCall + }; + + enum class UseDataIC : bool { No, Yes }; + + static CallMode callModeFor(CallType callType) + { + switch (callType) { + case Call: + case CallVarargs: + case DirectCall: + return CallMode::Regular; + case TailCall: + case TailCallVarargs: + case DirectTailCall: + return CallMode::Tail; + case Construct: + case ConstructVarargs: + case DirectConstruct: + return CallMode::Construct; + case None: + RELEASE_ASSERT_NOT_REACHED(); + } + + RELEASE_ASSERT_NOT_REACHED(); + } + + explicit CallLinkInfoBase(CallSiteType callSiteType) + : m_callSiteType(callSiteType) + { + } + + ~CallLinkInfoBase() + { + if (isOnList()) + remove(); + } + + CallSiteType callSiteType() const { return m_callSiteType; } + + void unlinkOrUpgrade(VM&, CodeBlock*, CodeBlock*); + +private: + CallSiteType m_callSiteType; +}; + +} // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp index c0682e02..3c629f7a 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2019 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include "JSCInlines.h" #include #include +#include namespace JSC { @@ -41,6 +42,8 @@ static constexpr bool verbose = false; } #endif +WTF_MAKE_TZONE_ALLOCATED_IMPL(CallLinkStatus); + CallLinkStatus::CallLinkStatus(JSValue value) : m_couldTakeSlowPath(false) , m_isProved(false) @@ -137,16 +140,12 @@ CallLinkStatus CallLinkStatus::computeFor( CallLinkStatus CallLinkStatus::computeFromCallLinkInfo( const ConcurrentJSLocker&, CallLinkInfo& callLinkInfo) { - // We cannot tell you anything about direct calls. - if (callLinkInfo.isDirect()) - return CallLinkStatus(); - if (callLinkInfo.clearedByGC() || callLinkInfo.clearedByVirtual()) return takesSlowPath(); // Note that despite requiring that the locker is held, this code is racy with respect // to the CallLinkInfo: it may get cleared while this code runs! This is because - // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns + // CallLinkInfoBase::unlinkOrUpgrade() may be called from a different CodeBlock than the one that owns // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock() // itself to figure out which lock to lock. diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.h index adeaa9ba..8b6f73ff 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallLinkStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2018 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include "ExitFlag.h" #include "ICStatusMap.h" #include "JSCJSValue.h" +#include namespace JSC { @@ -42,7 +43,7 @@ class Structure; class CallLinkInfo; class CallLinkStatus final { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(CallLinkStatus); public: CallLinkStatus() { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CallVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CallVariant.h index 853199dd..495f4ae2 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CallVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CallVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014-2019 Apple Inc. All rights reserved. + * Copyright (C) 2014-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "JSCast.h" #include "JSFunction.h" #include "NativeExecutable.h" +#include namespace JSC { @@ -61,7 +62,7 @@ namespace JSC { // cannot use WriteBarrier<> here because this gets used inside the compiler. class CallVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(CallVariant); public: explicit CallVariant(JSCell* callee = nullptr) : m_callee(callee) @@ -146,7 +147,7 @@ class CallVariant { return m_callee == deletedToken(); } - friend bool operator==(CallVariant, CallVariant) = default; + friend bool operator==(const CallVariant&, const CallVariant&) = default; bool operator<(const CallVariant& other) const { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.cpp index 9c54d174..781c372d 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Apple Inc. All rights reserved. + * Copyright (C) 2021-2023 Apple Inc. All rights reserved. * Copyright (C) 2021 Igalia S.A. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,9 +33,12 @@ #include "InlineCacheCompiler.h" #include "StructureStubInfo.h" #include +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(CheckPrivateBrandStatus); + bool CheckPrivateBrandStatus::appendVariant(const CheckPrivateBrandVariant& variant) { return appendICStatusVariant(m_variants, variant); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.h index a988f2af..e63aba06 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Apple Inc. All rights reserved. + * Copyright (C) 2021-2023 Apple Inc. All rights reserved. * Copyright (C) 2021 Igalia S.A. All rights reserved. * * @@ -34,6 +34,7 @@ #include "ExitFlag.h" #include "ICStatusMap.h" #include "StubInfoSummary.h" +#include namespace JSC { @@ -42,7 +43,7 @@ class CodeBlock; class StructureStubInfo; class CheckPrivateBrandStatus final { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(CheckPrivateBrandStatus); public: enum State : uint8_t { // It's uncached so we have no information. diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandVariant.h index 2b914c2d..a9f46906 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CheckPrivateBrandVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Apple Inc. All rights reserved. + * Copyright (C) 2021-2023 Apple Inc. All rights reserved. * Copyright (C) 2021 Igalia S.A. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,13 +29,14 @@ #include "CacheableIdentifier.h" #include "StructureSet.h" #include +#include namespace JSC { class CheckPrivateBrandStatus; class CheckPrivateBrandVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(CheckPrivateBrandVariant); public: CheckPrivateBrandVariant(CacheableIdentifier, const StructureSet& = StructureSet()); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.cpp index da80f4ce..b48ac31e 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008-2023 Apple Inc. All rights reserved. + * Copyright (C) 2008-2024 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich * * Redistribution and use in source and binary forms, with or without @@ -196,6 +196,11 @@ void CodeBlock::dump(PrintStream& out) const dumpAssumingJITType(out, jitType()); } +void CodeBlock::dumpSimpleName(PrintStream& out) const +{ + out.print(inferredName(), "#", hashAsStringIfPossible()); +} + void CodeBlock::dumpSource() { dumpSource(WTF::dataFile()); @@ -436,7 +441,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink }; auto link_callLinkInfo = [&](const auto& instruction, auto bytecode, auto& metadata) { - metadata.m_callLinkInfo.initialize(vm, CallLinkInfo::callTypeFor(decltype(bytecode)::opcodeID), instruction.index()); + metadata.m_callLinkInfo.initialize(vm, this, CallLinkInfo::callTypeFor(decltype(bytecode)::opcodeID), instruction.index()); }; #define LINK_FIELD(__field) \ @@ -735,7 +740,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink dumpBytecode(); if (m_metadata) - vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytesForGC()); + vm.heap.reportExtraMemoryAllocated(this, m_metadata->sizeInBytesForGC()); initializeTemplateObjects(topLevelExecutable, templateObjectIndices); RETURN_IF_EXCEPTION(throwScope, false); @@ -760,33 +765,30 @@ void CodeBlock::setupWithUnlinkedBaselineCode(Ref jitCode) { ConcurrentJSLocker locker(m_lock); ASSERT(!m_jitData); - auto baselineJITData = BaselineJITData::create(jitCode->m_constantPool.size(), m_globalObject.get()); - baselineJITData->m_stubInfos = FixedVector(jitCode->m_unlinkedStubInfos.size()); + auto baselineJITData = BaselineJITData::create(jitCode->m_unlinkedStubInfos.size(), jitCode->m_constantPool.size(), this); for (auto& unlinkedCallLinkInfo : jitCode->m_unlinkedCalls) { CallLinkInfo* callLinkInfo = getCallLinkInfoForBytecodeIndex(locker, unlinkedCallLinkInfo.bytecodeIndex); ASSERT(callLinkInfo); static_cast(callLinkInfo)->setCodeLocations(unlinkedCallLinkInfo.doneLocation); } + for (unsigned index = 0; index < jitCode->m_unlinkedStubInfos.size(); ++index) { + BaselineUnlinkedStructureStubInfo& unlinkedStubInfo = jitCode->m_unlinkedStubInfos[index]; + auto& stubInfo = baselineJITData->stubInfo(index); + stubInfo.initializeFromUnlinkedStructureStubInfo(vm(), unlinkedStubInfo); + } + for (size_t i = 0; i < jitCode->m_constantPool.size(); ++i) { auto entry = jitCode->m_constantPool.at(i); switch (entry.type()) { - case JITConstantPool::Type::StructureStubInfo: { - unsigned index = bitwise_cast(entry.pointer()); - BaselineUnlinkedStructureStubInfo& unlinkedStubInfo = jitCode->m_unlinkedStubInfos[index]; - StructureStubInfo& stubInfo = baselineJITData->m_stubInfos[index]; - stubInfo.initializeFromUnlinkedStructureStubInfo(unlinkedStubInfo); - baselineJITData->at(i) = &stubInfo; - break; - } case JITConstantPool::Type::FunctionDecl: { unsigned index = bitwise_cast(entry.pointer()); - baselineJITData->at(i) = functionDecl(index); + baselineJITData->trailingSpan()[i] = functionDecl(index); break; } case JITConstantPool::Type::FunctionExpr: { unsigned index = bitwise_cast(entry.pointer()); - baselineJITData->at(i) = functionExpr(index); + baselineJITData->trailingSpan()[i] = functionExpr(index); break; } } @@ -816,12 +818,6 @@ CodeBlock::~CodeBlock() { VM& vm = *m_vm; - // We use unvalidatedGet because get() has a validation assertion that rejects access. - // This assertion is correct since destruction order of cells is not guaranteed, and member cells could already be destroyed. - // But for CodeBlock, we are ensuring the order: CodeBlock gets destroyed before UnlinkedCodeBlock gets destroyed. - // So, we can access member UnlinkedCodeBlock safely here. We bypass the assertion by using unvalidatedGet. - UnlinkedCodeBlock* unlinkedCodeBlock = m_unlinkedCode.unvalidatedGet(); - if (JITCode::isBaselineCode(jitType())) { if (m_metadata) { m_metadata->forEach([&](auto& metadata) { @@ -848,16 +844,19 @@ CodeBlock::~CodeBlock() // to the CodeBlock. However, its lifecycle is tied directly to the CodeBlock, and // will be automatically cleared when the CodeBlock destructs. - if (JITCode::isOptimizingJIT(jitType())) + if (JSC::JITCode::isOptimizingJIT(jitType())) jitCode()->dfgCommon()->clearWatchpoints(); #endif - vm.heap.codeBlockSet().remove(this); - + if (UNLIKELY(vm.m_perBytecodeProfiler)) vm.m_perBytecodeProfiler->notifyDestruction(this); - if (!vm.heap.isShuttingDown() && unlinkedCodeBlock->didOptimize() == TriState::Indeterminate) - unlinkedCodeBlock->setDidOptimize(TriState::False); + if (LIKELY(!vm.heap.isShuttingDown())) { + if (m_metadata) { + if (m_metadata->unlinkedMetadata().didOptimize() == TriState::Indeterminate) + m_metadata->unlinkedMetadata().setDidOptimize(TriState::False); + } + } #if ENABLE(VERBOSE_VALUE_PROFILE) dumpValueProfiles(); @@ -869,7 +868,7 @@ CodeBlock::~CodeBlock() // So, if we don't remove incoming calls, and get destroyed before the // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's // destructor will try to remove nodes from our (no longer valid) linked list. - unlinkIncomingCalls(); + unlinkOrUpgradeIncomingCalls(vm, nullptr); // Note that our outgoing calls will be removed from other CodeBlocks' // m_incomingCalls linked lists through the execution of the ~CallLinkInfo @@ -881,7 +880,7 @@ CodeBlock::~CodeBlock() stubInfo.deref(); return IterationStatus::Continue; }); - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { #if ENABLE(DFG_JIT) if (auto* jitData = dfgJITData()) delete jitData; @@ -996,7 +995,7 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull() #if ENABLE(FTL_JIT) if (jitType() != JITType::DFGJIT) return nullptr; - DFG::JITCode* jitCode = m_jitCode->dfg(); + auto* jitCode = m_jitCode->dfg(); return jitCode->osrEntryBlock(); #else // ENABLE(FTL_JIT) return 0; @@ -1009,7 +1008,7 @@ size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm) size_t extraMemoryAllocated = 0; if (thisObject->m_metadata) extraMemoryAllocated += thisObject->m_metadata->sizeInBytesForGC(); - RefPtr jitCode = thisObject->m_jitCode; + RefPtr jitCode = thisObject->m_jitCode; if (jitCode && !jitCode->isShared()) extraMemoryAllocated += jitCode->size(); return Base::estimatedSize(cell, vm) + extraMemoryAllocated; @@ -1020,23 +1019,25 @@ inline void CodeBlock::forEachStructureStubInfo(Func func) { UNUSED_PARAM(func); #if ENABLE(JIT) - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { #if ENABLE(DFG_JIT) for (auto* stubInfo : jitCode()->dfgCommon()->m_stubInfos) { if (func(*stubInfo) == IterationStatus::Done) return; } if (auto* jitData = dfgJITData()) { - for (auto& stubInfo : jitData->stubInfos()) + for (auto& stubInfo : jitData->stubInfos()) { if (func(stubInfo) == IterationStatus::Done) return; + } } #endif } else { if (auto* jitData = baselineJITData()) { - for (auto& stubInfo : jitData->m_stubInfos) + for (auto& stubInfo : jitData->stubInfos()) { if (func(stubInfo) == IterationStatus::Done) return; + } } } #endif // ENABLE(JIT) @@ -1098,7 +1099,7 @@ bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker, Visitor& v // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when // their weak references go stale. So if a basline JIT CodeBlock gets // scanned, we can assume that this means that it's live. - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return true; return false; @@ -1109,7 +1110,7 @@ template bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker&, SlotVisi bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm) { - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return false; return !vm.heap.isMarked(this); } @@ -1251,7 +1252,7 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, Visitor& visitor #endif // ENABLE(JIT) #if ENABLE(DFG_JIT) - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); dfgCommon->recordedStatuses.markIfCheap(visitor); @@ -1304,7 +1305,7 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, Visitor& visitor) // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was // that we might decide that the CodeBlock should be jettisoned due to old age, so the // isMarked check doesn't protect us. - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return; DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); @@ -1575,9 +1576,11 @@ void CodeBlock::finalizeLLIntInlineCaches() void CodeBlock::finalizeJITInlineCaches() { #if ENABLE(DFG_JIT) - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { for (auto* callLinkInfo : m_jitCode->dfgCommon()->m_callLinkInfos) callLinkInfo->visitWeak(vm()); + for (auto* callLinkInfo : m_jitCode->dfgCommon()->m_directCallLinkInfos) + callLinkInfo->visitWeak(vm()); if (auto* jitData = dfgJITData()) { for (auto& callLinkInfo : jitData->callLinkInfos()) callLinkInfo.visitWeak(vm()); @@ -1616,7 +1619,7 @@ void CodeBlock::finalizeUnconditionally(VM& vm, CollectionScope) #endif #if ENABLE(DFG_JIT) - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); dfgCommon->recordedStatuses.finalize(vm); } @@ -1625,10 +1628,10 @@ void CodeBlock::finalizeUnconditionally(VM& vm, CollectionScope) auto updateActivity = [&] { if (!VM::useUnlinkedCodeBlockJettisoning()) return; - JITCode* jitCode = m_jitCode.get(); - double count = 0; + auto* jitCode = m_jitCode.get(); + float count = 0; bool alwaysActive = false; - switch (JITCode::jitTypeFor(jitCode)) { + switch (JSC::JITCode::jitTypeFor(jitCode)) { case JITType::None: case JITType::HostCallThunk: return; @@ -1683,7 +1686,7 @@ void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result) result.add(stubInfo.codeOrigin, ICStatus()).iterator->value.stubInfo = &stubInfo; return IterationStatus::Continue; }); - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { #if ENABLE(DFG_JIT) DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); for (auto* callLinkInfo : dfgCommon->m_callLinkInfos) @@ -1750,7 +1753,7 @@ CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(const ConcurrentJSLocke } #if ENABLE(DFG_JIT) - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); for (auto* callLinkInfo : dfgCommon->m_callLinkInfos) { if (callLinkInfo->codeOrigin() == CodeOrigin(index)) @@ -1785,7 +1788,7 @@ void CodeBlock::resetBaselineJITData() // these *infos, but when we have an OSR exit linked to this CodeBlock, we won't downgrade // to LLInt. - for (auto& stubInfo : jitData->m_stubInfos) { + for (auto& stubInfo : jitData->stubInfos()) { stubInfo.aboutToDie(); stubInfo.deref(); } @@ -1843,7 +1846,7 @@ void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, stubInfo.visitAggregate(visitor); return IterationStatus::Continue; }); - if (JITCode::isOptimizingJIT(jitType())) { + if (JSC::JITCode::isOptimizingJIT(jitType())) { #if ENABLE(DFG_JIT) DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); dfgCommon->recordedStatuses.visitAggregate(visitor); @@ -1859,7 +1862,7 @@ void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, Visitor& UNUSED_PARAM(visitor); #if ENABLE(DFG_JIT) - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return; DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); @@ -1901,7 +1904,7 @@ CodeBlock* CodeBlock::baselineVersion() return this; CodeBlock* result = replacement(); if (!result) { - if (JITCode::isOptimizingJIT(selfJITType)) { + if (JSC::JITCode::isOptimizingJIT(selfJITType)) { // The replacement can be null if we've had a memory clean up and the executable // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless, // the current codeBlock is still live on the stack, and as an optimizing JIT @@ -1965,7 +1968,7 @@ HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler required DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite) { #if ENABLE(DFG_JIT) - RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType())); + RELEASE_ASSERT(JSC::JITCode::isOptimizingJIT(jitType())); RELEASE_ASSERT(canGetCodeOrigin(originalCallSite)); ASSERT(!!handlerForIndex(originalCallSite.bits())); CodeOrigin originalOrigin = codeOrigin(originalCallSite); @@ -2044,29 +2047,22 @@ void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSi RELEASE_ASSERT_NOT_REACHED(); } -unsigned CodeBlock::lineNumberForBytecodeIndex(BytecodeIndex bytecodeIndex) +LineColumn CodeBlock::lineColumnForBytecodeIndex(BytecodeIndex bytecodeIndex) const { RELEASE_ASSERT(bytecodeIndex.offset() < instructions().size()); - return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeIndex(bytecodeIndex); + auto lineColumn = m_unlinkedCode->lineColumnForBytecodeIndex(bytecodeIndex); + lineColumn.column += lineColumn.line ? 1 : firstLineColumnOffset(); + lineColumn.line += ownerExecutable()->firstLine(); + return lineColumn; } -unsigned CodeBlock::columnNumberForBytecodeIndex(BytecodeIndex bytecodeIndex) +ExpressionInfo::Entry CodeBlock::expressionInfoForBytecodeIndex(BytecodeIndex bytecodeIndex) const { - int divot; - int startOffset; - int endOffset; - unsigned line; - unsigned column; - expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column); - return column; -} - -void CodeBlock::expressionRangeForBytecodeIndex(BytecodeIndex bytecodeIndex, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const -{ - m_unlinkedCode->expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column); - divot += sourceOffset(); - column += line ? 1 : firstLineColumnOffset(); - line += ownerExecutable()->firstLine(); + auto entry = m_unlinkedCode->expressionInfoForBytecodeIndex(bytecodeIndex); + entry.divot += sourceOffset(); + entry.lineColumn.column += entry.lineColumn.line ? 1 : firstLineColumnOffset(); + entry.lineColumn.line += ownerExecutable()->firstLine(); + return entry; } bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, std::optional column) @@ -2074,11 +2070,8 @@ bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, std::optionalis()) { - int unused; - unsigned opDebugLine; - unsigned opDebugColumn; - expressionRangeForBytecodeIndex(it.index(), unused, unused, unused, opDebugLine, opDebugColumn); - if (line == opDebugLine && (!column || column == opDebugColumn)) + auto lineColumn = lineColumnForBytecodeIndex(it.index()); + if (line == lineColumn.line && (!column || column == lineColumn.column)) return true; } } @@ -2097,28 +2090,23 @@ void CodeBlock::shrinkToFit(const ConcurrentJSLocker&, ShrinkMode shrinkMode) #endif } -#if ENABLE(JIT) -void CodeBlock::linkIncomingPolymorphicCall(CallFrame* callerFrame, PolymorphicCallNode* incoming) +void CodeBlock::linkIncomingCall(JSCell* caller, CallLinkInfoBase* incoming) { - noticeIncomingCall(callerFrame); - m_incomingPolymorphicCalls.push(incoming); -} -#endif // ENABLE(JIT) - -void CodeBlock::linkIncomingCall(CallFrame* callerFrame, CallLinkInfo* incoming) -{ - noticeIncomingCall(callerFrame); + if (caller) + noticeIncomingCall(caller); m_incomingCalls.push(incoming); } -void CodeBlock::unlinkIncomingCalls() +void CodeBlock::unlinkOrUpgradeIncomingCalls(VM& vm, CodeBlock* newCodeBlock) { - while (!m_incomingCalls.isEmpty()) - m_incomingCalls.begin()->unlink(vm()); -#if ENABLE(JIT) - while (!m_incomingPolymorphicCalls.isEmpty()) - m_incomingPolymorphicCalls.begin()->unlink(vm()); -#endif + SentinelLinkedList> toBeRemoved; + toBeRemoved.takeFrom(m_incomingCalls); + + // Note that upgrade may relink CallLinkInfo into newCodeBlock, and it is possible that |this| and newCodeBlock are the same. + // This happens when newCodeBlock is installed by upgrading LLInt to Baseline. In that case, |this|'s m_incomingCalls will + // be accumulated correctly. + while (!toBeRemoved.isEmpty()) + toBeRemoved.begin()->unlinkOrUpgrade(vm, this, newCodeBlock); } CodeBlock* CodeBlock::newReplacement() @@ -2229,7 +2217,7 @@ void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mod // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock. #if ENABLE(DFG_JIT) - if (JITCode::isOptimizingJIT(jitType())) + if (JSC::JITCode::isOptimizingJIT(jitType())) jitCode()->dfgCommon()->clearWatchpoints(); if (reason != Profiler::JettisonDueToOldAge) { @@ -2299,6 +2287,12 @@ JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) auto* inlineCallFrame = codeOrigin.inlineCallFrame(); if (!inlineCallFrame) return globalObject(); + // It is possible that the global object and/or other data relating to this origin + // was collected by GC, but we are still asking for this (ex: in a patchpoint generate() function). + // Plan::cancel should have cleared this in that case. + // Let's make sure we can continue to execute safely, even though we don't have a global object to give. + if (!inlineCallFrame->baselineCodeBlock) + return nullptr; return inlineCallFrame->baselineCodeBlock->globalObject(); } @@ -2342,12 +2336,11 @@ class RecursionCheckFunctor { mutable bool m_didRecurse; }; -void CodeBlock::noticeIncomingCall(CallFrame* callerFrame) +void CodeBlock::noticeIncomingCall(JSCell* caller) { RELEASE_ASSERT(!m_isJettisoned); - auto* owner = callerFrame->codeOwnerCell(); - CodeBlock* callerCodeBlock = jsDynamicCast(owner); + CodeBlock* callerCodeBlock = jsDynamicCast(caller); dataLogLnIf(Options::verboseCallLink(), "Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this); @@ -2386,7 +2379,7 @@ void CodeBlock::noticeIncomingCall(CallFrame* callerFrame) return; } - if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { + if (JSC::JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { m_shouldAlwaysBeInlined = false; dataLogLnIf(Options::verboseCallLink(), " Clearing SABI bcause caller was already optimized."); return; @@ -2402,18 +2395,8 @@ void CodeBlock::noticeIncomingCall(CallFrame* callerFrame) } // Recursive calls won't be inlined. - VM& vm = this->vm(); - RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth()); - StackVisitor::visit(vm.topCallFrame, vm, functor); - - if (functor.didRecurse()) { - dataLogLnIf(Options::verboseCallLink(), " Clearing SABI because recursion was detected."); - m_shouldAlwaysBeInlined = false; - return; - } - if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) { - dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n"); + dataLog("In call from ", FullCodeOrigin(callerCodeBlock, CodeOrigin { }), " to ", *this, ": caller's DFG capability level is not set.\n"); CRASH(); } @@ -2466,7 +2449,7 @@ unsigned CodeBlock::numberOfDFGCompiles() return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter; } CodeBlock* replacement = this->replacement(); - return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter; + return ((replacement && JSC::JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter; } int32_t CodeBlock::codeTypeThresholdMultiplier() const @@ -2655,7 +2638,7 @@ void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResu switch (result) { case CompilationSuccessful: - RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType())); + RELEASE_ASSERT(replacement && JSC::JITCode::isOptimizingJIT(replacement->jitType())); optimizeNextInvocation(); return; case CompilationFailed: @@ -2684,7 +2667,7 @@ void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResu uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold) { - ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(JSC::JITCode::isOptimizingJIT(jitType())); // Compute this the lame way so we don't saturate. This is called infrequently // enough that this loop won't hurt us. unsigned result = desiredThreshold; @@ -2756,7 +2739,7 @@ DFG::CodeOriginPool& CodeBlock::codeOrigins() size_t CodeBlock::numberOfDFGIdentifiers() const { - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return 0; return m_jitCode->dfgCommon()->m_dfgIdentifiers.size(); @@ -2768,7 +2751,7 @@ const Identifier& CodeBlock::identifier(int index) const size_t unlinkedIdentifiers = unlinkedCode->numberOfIdentifiers(); if (static_cast(index) < unlinkedIdentifiers) return unlinkedCode->identifier(index); - ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(JSC::JITCode::isOptimizingJIT(jitType())); return m_jitCode->dfgCommon()->m_dfgIdentifiers[index - unlinkedIdentifiers]; } #endif // ENABLE(DFG_JIT) @@ -2797,7 +2780,7 @@ bool CodeBlock::hasIdentifier(UniquedStringImpl* uid) } #if ENABLE(DFG_JIT) if (numberOfDFGIdentifiers) { - ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(JSC::JITCode::isOptimizingJIT(jitType())); auto& dfgIdentifiers = m_jitCode->dfgCommon()->m_dfgIdentifiers; for (unsigned index = 0; index < numberOfDFGIdentifiers; ++index) { const Identifier& identifier = dfgIdentifiers[index]; @@ -2817,7 +2800,7 @@ bool CodeBlock::hasIdentifier(UniquedStringImpl* uid) return true; } #if ENABLE(DFG_JIT) - ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(JSC::JITCode::isOptimizingJIT(jitType())); auto& dfgIdentifiers = m_jitCode->dfgCommon()->m_dfgIdentifiers; for (unsigned index = 0; index < numberOfDFGIdentifiers; ++index) { const Identifier& identifier = dfgIdentifiers[index]; @@ -2836,6 +2819,7 @@ void CodeBlock::updateAllNonLazyValueProfilePredictionsAndCountLiveness(const Co unsigned index = 0; UnlinkedCodeBlock* unlinkedCodeBlock = this->unlinkedCodeBlock(); + bool isBuiltinFunction = unlinkedCodeBlock->isBuiltinFunction(); forEachValueProfile([&](auto& profile, bool isArgument) { unsigned numSamples = profile.totalNumberOfSamples(); using Profile = std::remove_reference_t; @@ -2845,13 +2829,17 @@ void CodeBlock::updateAllNonLazyValueProfilePredictionsAndCountLiveness(const Co numberOfSamplesInProfiles += numSamples; if (isArgument) { profile.computeUpdatedPrediction(locker); - unlinkedCodeBlock->unlinkedValueProfile(index++).update(profile); + if (!isBuiltinFunction) + unlinkedCodeBlock->unlinkedValueProfile(index).update(profile); + ++index; return; } if (profile.numberOfSamples() || profile.isSampledBefore()) numberOfLiveNonArgumentValueProfiles++; profile.computeUpdatedPrediction(locker); - unlinkedCodeBlock->unlinkedValueProfile(index++).update(profile); + if (!isBuiltinFunction) + unlinkedCodeBlock->unlinkedValueProfile(index).update(profile); + ++index; }); if (m_metadata) { @@ -2890,10 +2878,13 @@ void CodeBlock::updateAllArrayProfilePredictions() return; unsigned index = 0; - + UnlinkedCodeBlock* unlinkedCodeBlock = this->unlinkedCodeBlock(); + bool isBuiltinFunction = unlinkedCodeBlock->isBuiltinFunction(); auto process = [&] (ArrayProfile& profile) { profile.computeUpdatedPrediction(this); - unlinkedCodeBlock()->unlinkedArrayProfile(index++).update(profile); + if (!isBuiltinFunction) + unlinkedCodeBlock->unlinkedArrayProfile(index).update(profile); + ++index; }; m_metadata->forEach([&] (auto& metadata) { @@ -2983,14 +2974,14 @@ bool CodeBlock::shouldOptimizeNowFromBaseline() #if ENABLE(DFG_JIT) void CodeBlock::tallyFrequentExitSites() { - ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(JSC::JITCode::isOptimizingJIT(jitType())); ASSERT(JITCode::isBaselineCode(alternative()->jitType())); CodeBlock* profiledBlock = alternative(); switch (jitType()) { case JITType::DFGJIT: { - DFG::JITCode* jitCode = m_jitCode->dfg(); + auto* jitCode = m_jitCode->dfg(); for (auto& exit : jitCode->m_osrExit) exit.considerAddingAsFrequentExitSite(profiledBlock); break; @@ -3001,7 +2992,7 @@ void CodeBlock::tallyFrequentExitSites() // There is no easy way to avoid duplicating this code since the FTL::JITCode::m_osrExit // vector contains a totally different type, that just so happens to behave like // DFG::JITCode::m_osrExit. - FTL::JITCode* jitCode = m_jitCode->ftl(); + auto* jitCode = m_jitCode->ftl(); for (auto& exit : jitCode->m_osrExit) exit.considerAddingAsFrequentExitSite(profiledBlock); break; @@ -3256,14 +3247,14 @@ void CodeBlock::addBreakpoint(unsigned numBreakpoints) { m_numBreakpoints += numBreakpoints; ASSERT(m_numBreakpoints); - if (JITCode::isOptimizingJIT(jitType())) + if (JSC::JITCode::isOptimizingJIT(jitType())) jettison(Profiler::JettisonDueToDebuggerBreakpoint); } void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode) { m_steppingMode = mode; - if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType())) + if (mode == SteppingModeEnabled && JSC::JITCode::isOptimizingJIT(jitType())) jettison(Profiler::JettisonDueToDebuggerStepping); } @@ -3489,6 +3480,14 @@ bool CodeBlock::useDataIC() const return true; } +CodePtr CodeBlock::addressForCallConcurrently(ArityCheckMode arityCheck) const +{ + ConcurrentJSLocker locker(m_lock); + if (!m_jitCode) + return nullptr; + return m_jitCode->addressForCall(arityCheck); +} + bool CodeBlock::hasInstalledVMTrapsBreakpoints() const { #if ENABLE(SIGNAL_BASED_VM_TRAPS) @@ -3509,7 +3508,7 @@ bool CodeBlock::canInstallVMTrapBreakpoints() const // This function may be called from a signal handler. We need to be // careful to not call anything that is not signal handler safe, e.g. // we should not perturb the refCount of m_jitCode. - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return false; if (m_jitCode->isUnlinked()) return false; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.h index 0b91e33e..1b31ac79 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008-2023 Apple Inc. All rights reserved. + * Copyright (C) 2008-2024 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich * * Redistribution and use in source and binary forms, with or without @@ -42,7 +42,7 @@ #include "DirectEvalCodeCache.h" #include "EvalExecutable.h" #include "ExecutionCounter.h" -#include "ExpressionRangeInfo.h" +#include "ExpressionInfo.h" #include "FunctionExecutable.h" #include "HandlerInfo.h" #include "ICStatusMap.h" @@ -152,6 +152,8 @@ class CodeBlock : public JSCell { void dumpAssumingJITType(PrintStream&, JITType) const; JS_EXPORT_PRIVATE void dump(PrintStream&) const; + void dumpSimpleName(PrintStream&) const; + MetadataTable* metadataTable() const { return m_metadata.get(); } unsigned numParameters() const { return m_numParameters; } @@ -241,10 +243,9 @@ class CodeBlock : public JSCell { HandlerInfo* handlerForBytecodeIndex(BytecodeIndex, RequiredHandler = RequiredHandler::AnyHandler); HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler); void removeExceptionHandlerForCallSite(DisposableCallSiteIndex); - unsigned lineNumberForBytecodeIndex(BytecodeIndex); - unsigned columnNumberForBytecodeIndex(BytecodeIndex); - void expressionRangeForBytecodeIndex(BytecodeIndex, int& divot, - int& startOffset, int& endOffset, unsigned& line, unsigned& column) const; + + LineColumn lineColumnForBytecodeIndex(BytecodeIndex) const; + ExpressionInfo::Entry expressionInfoForBytecodeIndex(BytecodeIndex) const; std::optional bytecodeIndexFromCallSiteIndex(CallSiteIndex); @@ -281,11 +282,8 @@ class CodeBlock : public JSCell { void resetBaselineJITData(); #endif // ENABLE(JIT) - void unlinkIncomingCalls(); - void linkIncomingCall(CallFrame* callerFrame, CallLinkInfo*); -#if ENABLE(JIT) - void linkIncomingPolymorphicCall(CallFrame* callerFrame, PolymorphicCallNode*); -#endif // ENABLE(JIT) + void unlinkOrUpgradeIncomingCalls(VM&, CodeBlock*); + void linkIncomingCall(JSCell* caller, CallLinkInfoBase*); const JSInstruction* outOfLineJumpTarget(const JSInstruction* pc); int outOfLineJumpOffset(JSInstructionStream::Offset offset) @@ -322,22 +320,22 @@ class CodeBlock : public JSCell { // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) CodeBlock* newReplacement(); - void setJITCode(Ref&& code) + void setJITCode(Ref&& code) { if (!code->isShared()) - heap()->reportExtraMemoryAllocated(code->size()); + heap()->reportExtraMemoryAllocated(this, code->size()); ConcurrentJSLocker locker(m_lock); WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid. m_jitCode = WTFMove(code); } - RefPtr jitCode() { return m_jitCode; } + RefPtr jitCode() { return m_jitCode; } static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); } JITType jitType() const { - JITCode* jitCode = m_jitCode.get(); - JITType result = JITCode::jitTypeFor(jitCode); + auto* jitCode = m_jitCode.get(); + JITType result = JSC::JITCode::jitTypeFor(jitCode); return result; } @@ -348,6 +346,8 @@ class CodeBlock : public JSCell { bool useDataIC() const; + CodePtr addressForCallConcurrently(ArityCheckMode) const; + #if ENABLE(JIT) CodeBlock* replacement(); @@ -443,7 +443,7 @@ class CodeBlock : public JSCell { // Having code origins implies that there has been some inlining. bool hasCodeOrigins() { - return JITCode::isOptimizingJIT(jitType()); + return JSC::JITCode::isOptimizingJIT(jitType()); } bool canGetCodeOrigin(CallSiteIndex index) @@ -507,7 +507,7 @@ class CodeBlock : public JSCell { const BitVector& bitVector(size_t i) { return m_unlinkedCode->bitVector(i); } - Heap* heap() const { return &m_vm->heap; } + JSC::Heap* heap() const { return &m_vm->heap; } JSGlobalObject* globalObject() { return m_globalObject.get(); } static ptrdiff_t offsetOfGlobalObject() { return OBJECT_OFFSETOF(CodeBlock, m_globalObject); } @@ -534,7 +534,7 @@ class CodeBlock : public JSCell { } BaselineJITData* baselineJITData() { - if (!JITCode::isOptimizingJIT(jitType())) + if (!JSC::JITCode::isOptimizingJIT(jitType())) return bitwise_cast(m_jitData); return nullptr; } @@ -549,7 +549,7 @@ class CodeBlock : public JSCell { DFG::JITData* dfgJITData() { - if (JITCode::isOptimizingJIT(jitType())) + if (JSC::JITCode::isOptimizingJIT(jitType())) return bitwise_cast(m_jitData); return nullptr; } @@ -883,7 +883,7 @@ class CodeBlock : public JSCell { CodeBlock* specialOSREntryBlockOrNull(); - void noticeIncomingCall(CallFrame* callerFrame); + void noticeIncomingCall(JSCell* caller); void updateAllNonLazyValueProfilePredictionsAndCountLiveness(const ConcurrentJSLocker&, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); @@ -955,12 +955,11 @@ class CodeBlock : public JSCell { VM* const m_vm; const void* const m_instructionsRawPointer { nullptr }; - SentinelLinkedList> m_incomingCalls; -#if ENABLE(JIT) - SentinelLinkedList> m_incomingPolymorphicCalls; -#endif + SentinelLinkedList> m_incomingCalls; + uint16_t m_optimizationDelayCounter { 0 }; + uint16_t m_reoptimizationRetryCounter { 0 }; StructureWatchpointMap m_llintGetByIdWatchpointMap; - RefPtr m_jitCode; + RefPtr m_jitCode; #if ENABLE(JIT) public: void* m_jitData { nullptr }; @@ -986,11 +985,9 @@ class CodeBlock : public JSCell { BaselineExecutionCounter m_jitExecuteCounter; - uint16_t m_optimizationDelayCounter { 0 }; - uint16_t m_reoptimizationRetryCounter { 0 }; + float m_previousCounter { 0 }; ApproximateTime m_creationTime; - double m_previousCounter { 0 }; std::unique_ptr m_rareData; @@ -1001,7 +998,7 @@ class CodeBlock : public JSCell { }; /* This check is for normal Release builds; ASSERT_ENABLED changes the size. */ #if defined(NDEBUG) && !defined(ASSERT_ENABLED) && COMPILER(GCC_COMPATIBLE) -static_assert(sizeof(CodeBlock) <= 240, "Keep it small for memory saving"); +static_assert(sizeof(CodeBlock) <= 224, "Keep it small for memory saving"); #endif template diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockHash.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockHash.h index 50844d4a..a181a481 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockHash.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockHash.h @@ -64,7 +64,7 @@ class CodeBlockHash { void dump(PrintStream&) const; // Comparison methods useful for bisection. - friend bool operator==(CodeBlockHash, CodeBlockHash) = default; + friend bool operator==(const CodeBlockHash&, const CodeBlockHash&) = default; bool operator<(const CodeBlockHash& other) const { return hash() < other.hash(); } bool operator>(const CodeBlockHash& other) const { return hash() > other.hash(); } bool operator<=(const CodeBlockHash& other) const { return hash() <= other.hash(); } diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp index d4104c2c..623734b6 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp @@ -33,10 +33,16 @@ namespace JSC { void CodeBlockJettisoningWatchpoint::fireInternal(VM&, const FireDetail& detail) { + // If CodeBlock is no longer live, we do not fire it. + // This works since CodeBlock is the owner of this watchpoint. When it gets destroyed, then this watchpoint also gets destroyed. + // Only problematic case is, (1) CodeBlock is dead, but (2) destructor is not called yet. In this case, isLive() check guards correctly. + if (!m_owner->isLive()) + return; + if (DFG::shouldDumpDisassembly()) - dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n"); + dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_owner, "\n"); - m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail); + m_owner->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail); } } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h index 5b6628e5..d96bc119 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h @@ -35,19 +35,19 @@ class CodeBlockJettisoningWatchpoint final : public Watchpoint { public: CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock = nullptr) : Watchpoint(Watchpoint::Type::CodeBlockJettisoning) - , m_codeBlock(codeBlock) + , m_owner(codeBlock) { } void initialize(CodeBlock* codeBlock) { - m_codeBlock = codeBlock; + m_owner = codeBlock; } void fireInternal(VM&, const FireDetail&); private: - PackedCellPtr m_codeBlock; + PackedCellPtr m_owner; }; } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.cpp index 3b8f3fa7..f0e023e3 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2018 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,9 +28,12 @@ #include "CodeBlock.h" #include "InlineCallFrame.h" +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL_NESTED(CodeOriginOutOfLineCodeOrigin, CodeOrigin::OutOfLineCodeOrigin); + unsigned CodeOrigin::inlineDepth() const { unsigned result = 1; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.h b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.h index fd8831fb..d941dfb8 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/CodeOrigin.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2019 Apple Inc. All rights reserved. + * Copyright (C) 2011-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include #include #include +#include #include #if OS(DARWIN) @@ -204,7 +205,7 @@ class CodeOrigin { static constexpr uintptr_t s_maskIsBytecodeIndexInvalid = 2; struct OutOfLineCodeOrigin { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(OutOfLineCodeOrigin); public: InlineCallFrame* inlineCallFrame; BytecodeIndex bytecodeIndex; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/DeferredSourceDump.h b/vendor/webkit/Source/JavaScriptCore/bytecode/DeferredSourceDump.h index e8d2ca8d..eccbb929 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/DeferredSourceDump.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/DeferredSourceDump.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2018 Apple Inc. All rights reserved. + * Copyright (C) 2015-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,13 +27,14 @@ #include "JITCode.h" #include "Strong.h" +#include namespace JSC { class CodeBlock; class DeferredSourceDump { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(DeferredSourceDump); public: DeferredSourceDump(CodeBlock*); DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITType rootJITType, BytecodeIndex callerBytecodeIndex); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.cpp index 079ac6b2..7b7cb823 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Apple Inc. All rights reserved. + * Copyright (C) 2020-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,9 +32,12 @@ #include "InlineCacheCompiler.h" #include "StructureStubInfo.h" #include +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(DeleteByStatus); + bool DeleteByStatus::appendVariant(const DeleteByVariant& variant) { return appendICStatusVariant(m_variants, variant); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.h index 388adbd2..2b4eb5c3 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Apple Inc. All rights reserved. + * Copyright (C) 2020-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,6 +33,7 @@ #include "ExitFlag.h" #include "ICStatusMap.h" #include "StubInfoSummary.h" +#include namespace JSC { @@ -41,7 +42,7 @@ class CodeBlock; class StructureStubInfo; class DeleteByStatus final { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(DeleteByStatus); public: enum State : uint8_t { // It's uncached so we have no information. diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByVariant.h index e2364922..3c8b8285 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/DeleteByVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Apple Inc. All rights reserved. + * Copyright (C) 2020-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,6 +31,7 @@ #include "PropertyOffset.h" #include "StructureSet.h" #include +#include namespace JSC { @@ -39,7 +40,7 @@ class DeleteByStatus; struct DumpContext; class DeleteByVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(DeleteByVariant); public: DeleteByVariant( CacheableIdentifier, bool result, diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp index 03d0b29c..f72f5cb8 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include "CodeBlock.h" #include "ExecutableAllocator.h" #include "VMInlines.h" +#include namespace JSC { @@ -212,5 +213,8 @@ void ExecutionCounter::dump(PrintStream& out) const template class ExecutionCounter; template class ExecutionCounter; +WTF_MAKE_TZONE_ALLOCATED_IMPL_TEMPLATE(BaselineExecutionCounter); +WTF_MAKE_TZONE_ALLOCATED_IMPL_TEMPLATE(UpperTierExecutionCounter); + } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.h index caadb104..b79ae3ac 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ExecutionCounter.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2017 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,7 @@ #include "Options.h" #include #include +#include namespace JSC { @@ -54,7 +55,7 @@ inline int32_t formattedTotalExecutionCount(float value) template class ExecutionCounter { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(ExecutionCounter); WTF_MAKE_NONMOVABLE(ExecutionCounter); public: ExecutionCounter(); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfo.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfo.cpp new file mode 100644 index 00000000..83729488 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfo.cpp @@ -0,0 +1,1081 @@ +/* + * Copyright (C) 2024 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExpressionInfo.h" + +#include "VM.h" +#include +#include +#include + +namespace JSC { + +/* + Since ExpressionInfo is only used to get source position info (e.g. line and column) + for error messages / stacks and debugging, it need not be fast. However, empirical data + shows that there is a lot of it on websites that are especially memory hungry. So, it + behooves us to reduce this memory burden. + + ExpressionInfo is a data structure that contains: + a. EncodedInfo entries. + This is where the expression info data is really stored. + + b. Chapter marks in the list of EncodedInfo entries. + This is just an optimization aid to speed up reconstruction of expression info + from the EncodedInfo. + + c. A LineColumnMap cache. + This is to speed up look up of LineColumn values we have looked up before. + + Encoding of EncodedInfo words + ============================= + + Extension: [ 11111b | 1b | offset:26 ] + AbsInstPC: [ 11111b | 0b | value:26 ] + MultiWide: [ 11110b | 1b | 111b | numFields:5 | fields[6]:18 ] [ value:32 ] ... + DuoWide: [ 11110b | 1b | field2:3 | value2:10 | field1:3 | value1:10 ] + SingleWide: [ 11110b | 0b | field:3 | value:23 ] + ExtensionEnd: [ 11110b | 0b | 111b | 0:23 ] + Basic: [ instPC:5 | divot:7 | start:6 | end:6 | line:3 | column:5 ] + + Details of what these encodings are used for follows. + + EncodedInfo + =========== + Abstractly, each expression info defines 6 fields of unsigned type: + 1. InstPC: bytecodeIndex offset. + 2. Divot: the execution point in an expression. + 3. Start: the offset from the Divot to the start of the expression. + 4. End: the offset from the Divot to the end of the expression. + 5. Line: the line number at the Divot. + 6. Column: the column number at the Divot. + + Let's call this an expression info entry, represented by ExpressionInfo::Entry. + + However, we know that the delta values of these fields between consecutive entries tend to be + small. So, instead of encoding an Entry with 24 bytes, we aim to encode the info in just 4 bytes + in an EncodedInfo word. See the encoding of the Basic word above. + + UnlinkedCodeBlockGenerator::addExpressionInfo() triggers the addition of EncodedInfo. + Each call of addExpressionInfo() may add 1 or more EncodedInfo. It can be more than 1 because + the delta of some of the fields may exceed the capacity afforded them in the Basic word. + + To compensate for this, we introduce Wide Encodings of the EncodedInfo word. + + Wide Encodings + ============== + Wide Encodings specify additional delta to add to each field value. Wide Encodings must + eventually be followed by a Basic word. The Basic word acts as the terminator for each + expression info entry. + + The 3 types of Wide Encodings are: + + 1. SingleWide: encodes a single field value with size singleValueBits (currently 23) bits. + 2. DuoWide: encodes 2 field values with size duoValueBits (currently 10) bits each. + 3. MultiWide: encodeds up to 6 field values (because there are only 6 fields in an expression + info record). The 1st word will be a header specifying the FieldIDs. Subsequent words are + full 32-bit values for those respective fields. + + Extension Encodings + =================== + Some additional consideration: after UnlinkedCodeBlockGenerator::addExpressionInfo() is done, + UnlinkedCodeBlockGenerator::applyModification() may insert, remove, add new bytecode due to + generatorification. Hence, the InstPC the EncodedInfo defining each Entry needs to be adjusted + accordingly. Similarly, we also need to adjust the Chapter marks in the EncodedInfo. + + Given that the EncodedInfo is a compressed form of the Entry fields, there may or may not be + sufficient bits to encode the new InstPC. The straightforward implementation would be to + insert some Wide Encodings to add additional delta for the InstPC. However, this will result + in a lot of shifting of the EncodedInfo vector. + + For simplicity and performance (given these adjustments are more rare), rather than actually + inserting additional EncodedInfo, we do an inplace replacement of the first EncodedInfo for the + Entry with an Extension (see encodings above) with an offset to an extension section in the + EncodedInfo vector (pass the end of the normal non-Extension info). This effectively behaves + like a call to an extension routine to apply some additional EncodedInfo to adjust the InstPC + as needed. + + The original first EncodedInfo for the Entry will be copied over as the first EncodedInfo in + the extension section. The only exception to this is when the first EncodedInfo is a Basic + word. By definition, the Basic word is the terminator i.e. it must be the last EncodedInfo in + the sequence for each Entry. + + To terminate the extension island, we introduce an ExtensionEnd word that tells the decoder + to resume decoding in the original EncodedInfo stream (effectively a return operation). + This is needed if the replaced word is not a Basic word (which again is the terminator). + If the replaced word is the Basic word, then we don't need to append an ExtensionEnd at the + end of the extension and will execute the "return" operation. + + Miscellaneous details: + 1. Currently, the code expects that Extensions cannot be nested. There is no need to nest, + and there are ASSERTs in the decoder to check for this. However, if we find a need in the + future, this can certainly be changed. + + 2. The inplace replacement works on the assumption that we can move a "complete" EncodedInfo + over to the extension island. For all encodings, this is just a single word. The only + exception is the MultiWide encoding, which are followed by N value words. Because the + decoder expects these words to be contiguous, we move all the value words over too. The + slots of the original value words will not be replaced by no-ops (SingleWide with 0). + + AbsInstPC and Chapters + ====================== + One downside of this compressed encoding scheme is that every time we need the Entry value + for a given InstPC, we need to decode from the start of the EncodedInfo vector to compute the + cummulative value of all the deltas up to the Entry of interest. This can be slow if the + EncodedInfo vector is large. + + To mitigate against this, we break the EncodedInfo regions into Chapters every + numberOfWordsBetweenChapters (currently 10000) words or so. A Chapter is always started + with an AbsInstPC (Absolute Instruction PC) word. Unlike other EncodedInfo words with defines + delta values for fields to add, AbsInstPC defines an absolute value (currently 26 bits) for + the InstPC to mark the start of the Chapter. If 26-bits isn't enough, we can always add + SingleWide or MultiWide words to make up the difference of the InstPC for this Entry. + + Apart from resetting the cummulative InstPC to this new value, AbsInstPC also implicitly + clears to 0 the cummulative delta for all the other fields. As a result, we can always jump + to the start of a Chapter and start decoding from there (instead of starting from the start + of the EncodedInfo vector). It limits the number of words that we need to decode to around + numberOfWordsBetweenChapters. + + Now, all we need to know is which Chapter a given InstPC falls into. + + Finding Chapters + ================ + For this, we have a vector of Chapter info, where each Chapter defines its starting InstPC + and the EncodedInfo word index for the start of the first encoded Entry in that Chapter. + + The first Chapter always starts at InstPC 0 and EncodedInfo index 0. Hence, we do not emit + a Chapter entry for the 1st Chapter. Hence, for most small functions that do not have + more than numberOfWordsBetweenChapters EncodedInfo, there is no Chapter info in the + ExpressionInfo. + + Additional Compression Details (of Basic word fields) + ===================================================== + + 1. Column Reset on Line change + + When Line changes from the last Entry, we always reset the cummulative Column to 0. This makes + sense because Column is relative to the start of the Line. Without this reset, the delta for + Column may be some huge negative value (which hurts compression). + + 2. Column same as Divot + + Column often has the same value as Divot. This is because a lot of websites use compacted + and obfuscated JS code in a single line. Therefore, the column position is exactly the Divot + position. So, the Basic word reserves the value sameAsDivotValue (currently 0b1111) as an + indicator that the Column delta to apply is same as the Divot delta for this Entry. This + reduces the number of Wide values we need to encode both if the Divot is large. + + 3. Biased fields + + Divot, Line, and Column deltas are signed ints, not unsigned. This is because the evaluation + of an expression isn't in sequential order. For example, consider this expression: + + foo(bar()) + ^ ^-------- Divot, Line, and Column for bar() + `------------ Divot, Line, and Column for foo() + + The order of evaluation is first to evaluate the call to bar() followed by the call to foo(). + As a result, the Divot delta for the call to foo() is a negative value relative to the Divot + for the call to bar(). Similarly, this is true for Line and Column. + + Since the delta values for these 3 fields are signed, instead of storing an unsigned value at + their bitfield positions in the Basic word, we store a biased value: Divot + divotBias, + Line + lineBias, and Column + columnBias. This maps the values into an unsigned, and makes it + easy to do a bounds check against the max capacity of the bitfield. + + Similarly, ExpressionInfo::Diff which is used to track the delta for each field has signed int + for these fields. This is in contrast to Entry and LineColumn where these fields are stored + as unsigned. + + Backing Store and Shape + ======================= + The ExpressionInfo and its backing store (with the exception of the contents of the + LineColumnMap cache) is allocated as a contiguous slab. We first compute the size of the slab, + then allocate it, and lastly use placement new to instantiate the ExpressionInfo. + + The shape of ExpressionInfo looks like this: + + ExpressionInfo: [ m_cachedLineColumns ] + [ m_numberOfChapters ] + [ m_numberOfEncodedInfo ] + [ m_numberOfEncodedInfoExtensions ] + Chapters Start: [ chapters()[0] ] + ... + [ chapters()[m_numberOfChapters - 1] ] + EncodedInfo Start: [ encodedInfo()[0] ] + ... + [ encodedInfo()[m_numberOfEncodedInfo - 1] ] + Extensions Start: [ encodedInfo()[m_numberOfEncodedInfo] ] + ... + [ encodedInfo()[m_numberOfEncodedInfo + m_numberOfEncodedInfoExtensions - 1] ] + Extensions End: +*/ + +struct ExpressionInfo::Diff { + using FieldID = ExpressionInfo::FieldID; + + template + void set(FieldID fieldID, unsigned value) + { + switch (fieldID) { + case FieldID::InstPC: + instPC += cast(value); + break; + case FieldID::Divot: + divot += cast(value); + break; + case FieldID::Start: + start += cast(value); + break; + case FieldID::End: + end += cast(value); + break; + case FieldID::Line: + line += cast(value); + break; + case FieldID::Column: + column += cast(value); + break; + } + } + + void reset() + { + instPC = 0; + divot = 0; + start = 0; + end = 0; + line = 0; + column = 0; + } + + unsigned instPC { 0 }; + int divot { 0 }; + unsigned start { 0 }; + unsigned end { 0 }; + int line { 0 }; + int column { 0 }; +}; + +// The type for divot, line, and column is intentionally int, not unsigned. These are +// diff values which can be negative. These asserts are just here to draw attention to +// this comment in case anyone naively changes their type. +static_assert(std::is_same_v); +static_assert(std::is_same_v); +static_assert(std::is_same_v); + +bool ExpressionInfo::EncodedInfo::isAbsInstPC() const +{ + if (value < (specialHeader << headerShift)) + return false; + bool isMulti = (value >> multiBitShift) & 1; + return !isMulti; +} + +bool ExpressionInfo::EncodedInfo::isExtension() const +{ + if (value < (specialHeader << headerShift)) + return false; + bool isMulti = (value >> multiBitShift) & 1; + return isMulti; +} + +auto ExpressionInfo::Encoder::encodeAbsInstPC(InstPC absInstPC) -> EncodedInfo +{ + unsigned word = specialHeader << headerShift + | 0 << multiBitShift + | (absInstPC & maxSpecialValue) << specialValueShift; + return { word }; +} + +auto ExpressionInfo::Encoder::encodeExtension(unsigned offset) -> EncodedInfo +{ + RELEASE_ASSERT((offset & maxSpecialValue) == offset); + unsigned word = specialHeader << headerShift + | 1 << multiBitShift + | (offset & maxSpecialValue) << specialValueShift; + return { word }; +} + +auto constexpr ExpressionInfo::Encoder::encodeExtensionEnd() -> EncodedInfo +{ + unsigned word = wideHeader << headerShift + | 0 << multiBitShift + | static_cast(invalidFieldID) << firstFieldIDShift; + return { word }; +} + +auto ExpressionInfo::Encoder::encodeSingle(FieldID fieldID, unsigned value) -> EncodedInfo +{ + unsigned word = wideHeader << headerShift + | 0 << multiBitShift + | static_cast(fieldID) << firstFieldIDShift + | (value & maxSingleValue) << singleValueShift; + return { word }; +} + +auto ExpressionInfo::Encoder::encodeDuo(FieldID fieldID1, unsigned value1, FieldID fieldID2, unsigned value2) -> EncodedInfo +{ + unsigned word = wideHeader << headerShift + | 1 << multiBitShift + | static_cast(fieldID1) << firstFieldIDShift + | (value1 & maxDuoValue) << duoFirstValueShift + | static_cast(fieldID2) << duoSecondFieldIDShift + | (value2 & maxDuoValue) << duoSecondValueShift; + return { word }; +} + +auto ExpressionInfo::Encoder::encodeMultiHeader(unsigned numWides, Wide* wides) -> EncodedInfo +{ + unsigned word = wideHeader << headerShift + | 1 << multiBitShift + | static_cast(invalidFieldID) << firstFieldIDShift + | numWides << multiSizeShift; + unsigned fieldShift = multiFirstFieldShift; + for (unsigned i = 0; i < numWides; ++i) { + word |= static_cast(wides[i].fieldID) << fieldShift; + fieldShift -= fieldIDBits; + } + return { word }; +} + +auto ExpressionInfo::Encoder::encodeBasic(const Diff& diff) -> EncodedInfo +{ + ASSERT(diff.instPC <= maxInstPCValue); + ASSERT(diff.start <= maxStartValue); + ASSERT(diff.end <= maxEndValue); + unsigned biasedDivot = diff.divot + divotBias; + unsigned biasedLine = diff.line + lineBias; + unsigned biasedColumn = diff.column == INT_MAX ? sameAsDivotValue : diff.column + columnBias; + + ASSERT(biasedDivot <= maxBiasedDivotValue); + ASSERT(biasedLine <= maxBiasedLineValue); + ASSERT(biasedColumn <= maxBiasedColumnValue || (diff.column == INT_MAX && biasedColumn == sameAsDivotValue)); + + unsigned word = diff.instPC << instPCShift | biasedDivot << divotShift + | diff.start << startShift | diff.end << endShift + | biasedLine << lineShift | biasedColumn << columnShift; + return { word }; +} + +void ExpressionInfo::Encoder::adjustInstPC(EncodedInfo* info, unsigned instPCDelta) +{ + unsigned infoIndex = info - &m_expressionInfoEncodedInfo[0]; + auto* firstInfo = info; + unsigned firstValue = firstInfo->value; + + unsigned headerBits = firstValue >> headerShift; + bool isMulti = (firstValue >> multiBitShift) & 1; + unsigned firstFieldIDBits = (firstValue >> firstFieldIDShift) & fieldIDMask; + + bool isBasic = false; + + if (headerBits == specialHeader) { + // Handle AbsInstPC. + ASSERT(!isMulti); // firstWord cannot already be an Extension word. + unsigned instPC = cast(firstValue); + unsigned updatedInstPC = instPC + instPCDelta; + if (fits(updatedInstPC)) { + *firstInfo = encodeAbsInstPC(updatedInstPC); + return; + } + goto emitExtension; + } + + if (headerBits == wideHeader) { + if (!isMulti) { + // Handle SingleWide. + ASSERT(firstFieldIDBits != invalidFieldID); // firstWord cannot already be an ExtensionEnd word. + FieldID fieldID = static_cast(firstFieldIDBits); + unsigned candidateInstPC = cast(firstValue); + unsigned updatedInstPC = candidateInstPC + instPCDelta; + if (fieldID == FieldID::InstPC && fits(updatedInstPC)) { + *firstInfo = encodeSingle(FieldID::InstPC, updatedInstPC); + return; + } + goto emitExtension; + + } + + if (firstFieldIDBits != invalidFieldID) { + // Handle DuoWide. + ASSERT(firstFieldIDBits != invalidFieldID); // firstWord cannot already be an ExtensionEnd word. + FieldID fieldID = static_cast(firstFieldIDBits); + unsigned candidateInstPC = cast(firstValue >> duoFirstValueShift); + unsigned updatedInstPC = candidateInstPC + instPCDelta; + if (fieldID == FieldID::InstPC && fits(updatedInstPC)) { + FieldID fieldID2 = static_cast((firstValue >> duoSecondFieldIDShift) & fieldIDMask); + unsigned value2 = cast(firstValue >> duoSecondValueShift); + *firstInfo = encodeDuo(FieldID::InstPC, updatedInstPC, fieldID2, value2); + return; + } + goto emitExtension; + } + + // Handle MultiWide. + FieldID firstMultiFieldID = static_cast((firstValue >> multiFirstFieldShift) & fieldIDMask); + if (firstMultiFieldID == FieldID::InstPC) { + unsigned instPC = m_expressionInfoEncodedInfo[infoIndex + 1].value; + unsigned updatedInstPC = instPC + instPCDelta; + m_expressionInfoEncodedInfo[infoIndex + 1].value = updatedInstPC; + return; + } + + // We can't just move the MultiWide header to the extension: we have to move the + // whole MultiWide record (i.e. multiple words). The Decoder relies on them to + // being contiguous. + unsigned numberOfFields = (firstValue >> multiSizeShift) & multiSizeMask; + + m_expressionInfoEncodedInfo.append({ firstValue }); // MultiWide header. + for (unsigned i = 1; i < numberOfFields; ++i) { + m_expressionInfoEncodedInfo.append(firstInfo[i]); + firstInfo[i] = encodeSingle(FieldID::InstPC, 0); // Replace with a no-op. + } + // Save the last field in firstValue, and let the extension emitter below append it. + firstValue = firstInfo[numberOfFields].value; + firstInfo[numberOfFields] = encodeSingle(FieldID::InstPC, 0); // Replace with a no-op. + goto emitExtension; + } + + // Handle Basic. + { + unsigned instPC = cast(firstValue >> instPCShift); + unsigned updatedInstPC = instPC + instPCDelta; + if (updatedInstPC < maxInstPCValue) { + unsigned replacement = firstValue & ((1u << instPCShift) - 1); + replacement |= updatedInstPC << instPCShift; + *firstInfo = { replacement }; + return; + } + } + isBasic = true; + +emitExtension: + unsigned extensionOffset = m_expressionInfoEncodedInfo.size() - infoIndex; + m_expressionInfoEncodedInfo[infoIndex] = encodeExtension(extensionOffset); + + // Because the Basic word is used as a terminator for the current Entry, + // if the firstValue is a Basic word, it needs to come last. Otherwise, we should + // just emit firstValue first. AbsInstPC and MultiWide relies on this for correctness. + if (!isBasic) + m_expressionInfoEncodedInfo.append({ firstValue }); + + if (fits(instPCDelta)) + m_expressionInfoEncodedInfo.append(encodeSingle(FieldID::InstPC, instPCDelta)); + else { + // The wides array is really only to enable us to use encodeMultiHeader. Hence, + // we don't really need to store instPCDelta as the value here. It can be any value + // since it's not ued. However, to avoid confusion, we'll just populate it consistently. + Wide wides[1] = { { instPCDelta, FieldID::InstPC } }; + m_expressionInfoEncodedInfo.append(encodeMultiHeader(1, wides)); + m_expressionInfoEncodedInfo.append({ instPCDelta }); + } + + if (isBasic) { + // If we're terminating with the Basic word, then we don't need the + // ExtensionEnd because the Basic word is an implied end. + m_expressionInfoEncodedInfo.append({ firstValue }); + } else + m_expressionInfoEncodedInfo.append(encodeExtensionEnd()); +} + +void ExpressionInfo::Encoder::encode(InstPC instPC, unsigned divot, unsigned startOffset, unsigned endOffset, LineColumn lineColumn) +{ + unsigned numWides = 0; + std::array wides; + + auto appendWide = [&] (FieldID id, unsigned value) { + wides[numWides++] = { value, id }; + }; + + unsigned currentEncodedInfoIndex = m_expressionInfoEncodedInfo.size(); + unsigned chapterSize = currentEncodedInfoIndex - m_currentChapterStartIndex; + if (chapterSize >= numberOfWordsBetweenChapters) { + m_expressionInfoChapters.append({ instPC, currentEncodedInfoIndex }); + m_currentChapterStartIndex = currentEncodedInfoIndex; + unsigned absInstPC = std::min(instPC, maxSingleValue); + m_expressionInfoEncodedInfo.append(encodeAbsInstPC(absInstPC)); + m_entry.reset(); + m_entry.instPC = absInstPC; + } + + Diff diff; + diff.instPC = instPC - m_entry.instPC; + diff.divot = divot - m_entry.divot; + diff.start = startOffset; + diff.end = endOffset; + + diff.line = lineColumn.line - m_entry.lineColumn.line; + if (diff.line) + m_entry.lineColumn.column = 0; + + diff.column = lineColumn.column - m_entry.lineColumn.column; + + bool sameDivotAndColumnDiff = diff.column == diff.divot; + + // Divot, line, and column diffs can negative values. To maximize the chance that they fit + // in a Basic word, we apply a bias to these values. InstPC is always monotonically increasing + // i.e. it's diff is always positive and unsigned. Start and end are already relative to divot + // i.e. their diffs are always positive and unsigned. Hence, instPC, start, and end do not + // require a bias. + + // Encode header: + if (diff.instPC > maxInstPCValue) { + appendWide(FieldID::InstPC, diff.instPC); + diff.instPC = 0; + } + + // Encode divot: + if (diff.divot + divotBias > maxBiasedDivotValue) { + appendWide(FieldID::Divot, diff.divot); + diff.divot = 0; + } + + // Encode start: + if (diff.start > maxStartValue) { + appendWide(FieldID::Start, diff.start); + diff.start = 0; + } + + // Encode end: + if (diff.end > maxEndValue) { + appendWide(FieldID::End, diff.end); + diff.end = 0; + } + + // Encode line: + if (diff.line + lineBias > maxBiasedLineValue) { + appendWide(FieldID::Line, diff.line); + diff.line = 0; + } + + // Encode column: + if (sameDivotAndColumnDiff) + diff.column = INT_MAX; + else if (diff.column + columnBias > maxBiasedColumnValue) { + appendWide(FieldID::Column, diff.column); + diff.column = 0; + } + + m_entry.instPC = instPC; + m_entry.divot = divot; + m_entry.lineColumn = lineColumn; + + // Canonicalize the wide EncodedInfo. + { + unsigned lastDuoIndex = numWides; + unsigned numDuoWides = 0; + + // We want to process the InstPC wide (if present) last. This enables an InstPC wide to be emitted + // first (if possible) to simplify the remap logic in adjustInst(). adjustInst() assumes that + // the InstPC wide (if present) will likely be in the first word. + for (unsigned i = numWides; i--; ) { + auto& wide = wides[i]; + if (fits(wide)) { + wide.order = Wide::SortOrder::Duo; + numDuoWides++; + lastDuoIndex = i; + } else if (fits(wide)) + wide.order = Wide::SortOrder::Single; + else + wide.order = Wide::SortOrder::Multi; + } + + if (numDuoWides & 1) + wides[lastDuoIndex].order = Wide::SortOrder::Single; + + auto* widesData = wides.data(); + std::sort(widesData, widesData + numWides, [] (auto& a, auto& b) { + if (static_cast(a.order) < static_cast(b.order)) + return true; + if (static_cast(a.order) == static_cast(b.order)) + return a.fieldID < b.fieldID; + return false; + }); + + } + + // Emit the wide EncodedInfo. + for (unsigned i = 0; i < numWides; ++i) { + auto& wide = wides[i]; + + if (wide.order == Wide::SortOrder::Single) { + m_expressionInfoEncodedInfo.append(encodeSingle(wide.fieldID, wide.value)); + continue; + } + + if (wide.order == Wide::SortOrder::Duo) { + auto& wide2 = wides[++i]; + ASSERT(fits(wide)); + ASSERT(fits(wide2)); + m_expressionInfoEncodedInfo.append(encodeDuo(wide.fieldID, wide.value, wide2.fieldID, wide2.value)); + continue; + } + + ASSERT(wide.order == Wide::SortOrder::Multi); + unsigned remainingWides = numWides - i; + m_expressionInfoEncodedInfo.append(encodeMultiHeader(remainingWides, &wide)); + while (i < numWides) + m_expressionInfoEncodedInfo.append({ wides[i++].value }); + } + + m_expressionInfoEncodedInfo.append(encodeBasic(diff)); +} + +template +bool ExpressionInfo::Encoder::fits(Wide wide) +{ + switch (wide.fieldID) { + case FieldID::InstPC: + case FieldID::Start: + case FieldID::End: + return fits(wide.value); + case FieldID::Divot: + case FieldID::Line: + case FieldID::Column: + return fits(wide.value); + } + return false; // placate GCC. +} + +template +bool ExpressionInfo::Encoder::fits(T value) +{ + struct Caster { + T value : bitCount; + } caster; + caster.value = value; + return caster.value == value; +} + +MallocPtr ExpressionInfo::Encoder::createExpressionInfo() +{ + size_t numberOfChapters = m_expressionInfoChapters.size(); + size_t numberOfEncodedInfo = m_expressionInfoEncodedInfo.size() - m_numberOfEncodedInfoExtensions; + size_t totalSize = ExpressionInfo::totalSizeInBytes(numberOfChapters, numberOfEncodedInfo, m_numberOfEncodedInfoExtensions); + auto info = MallocPtr::malloc(totalSize); + new (info.get()) ExpressionInfo(WTFMove(m_expressionInfoChapters), WTFMove(m_expressionInfoEncodedInfo), m_numberOfEncodedInfoExtensions); + return info; +} + +ExpressionInfo::Decoder::Decoder(const ExpressionInfo& expressionInfo) + : m_startInfo(expressionInfo.encodedInfo()) + , m_endInfo(expressionInfo.endEncodedInfo()) + , m_endExtensionInfo(expressionInfo.endExtensionEncodedInfo()) + , m_currentInfo(m_startInfo) + , m_nextInfo(m_startInfo) +{ +} + +// This constructor is only used by Encoder for remapping encodedInfo. +ExpressionInfo::Decoder::Decoder(Vector& encodedInfoVector) + : m_startInfo(encodedInfoVector.begin()) + , m_endInfo(encodedInfoVector.end()) + , m_endExtensionInfo(encodedInfoVector.end()) + , m_currentInfo(m_startInfo) + , m_nextInfo(m_startInfo) +{ +} + +void ExpressionInfo::Decoder::recacheInfo(Vector& encodedInfoVector) +{ + if (m_endInfo == encodedInfoVector.end()) + return; // Did not resize i.e nothing changed. + + m_currentInfo = &encodedInfoVector[m_currentInfo - m_startInfo]; + m_nextInfo = &encodedInfoVector[m_nextInfo - m_startInfo]; + + m_endInfo = &encodedInfoVector[m_endInfo - m_startInfo]; + m_startInfo = encodedInfoVector.begin(); + m_endExtensionInfo = encodedInfoVector.end(); +} + +template +T ExpressionInfo::cast(unsigned value) +{ + struct Caster { + T value : bitCount; + }; + Caster caster; + caster.value = value; + return caster.value; +} + +bool ExpressionInfo::isSpecial(unsigned value) +{ + return value >= (specialHeader << headerShift); +} + +bool ExpressionInfo::isWideOrSpecial(unsigned value) +{ + return value >= (wideHeader << headerShift); +} + +IterationStatus ExpressionInfo::Decoder::decode(std::optional targetInstPC) +{ + m_currentInfo = m_nextInfo; // Go decode the next Entry. + + ASSERT(m_currentInfo <= m_endInfo); + ASSERT(m_endInfo <= m_endExtensionInfo); + auto* currentInfo = m_currentInfo; + if (currentInfo == m_endInfo) + return IterationStatus::Done; + + Diff diff; + + unsigned value = currentInfo->value; + + EncodedInfo* savedInfo = nullptr; + bool hasAbsInstPC = false; + InstPC currentInstPC = m_entry.instPC; + + // Decode wide words. + while (isWideOrSpecial(value)) { + bool isSpecial = ExpressionInfo::isSpecial(value); + bool isMulti = (value >> multiBitShift) & 1; + unsigned firstFieldIDBits = (value >> firstFieldIDShift) & fieldIDMask; + + if (isSpecial) { + if (isMulti) { + // Decode Extension word. + unsigned extensionOffset = cast(value); + savedInfo = currentInfo; + currentInfo += extensionOffset - 1; // -1 to compensate for the increment below. + ASSERT(currentInfo >= m_endInfo - 1); + ASSERT(currentInfo < m_endExtensionInfo); + + } else { + // Decode AbsInstPC word. + ASSERT(currentInfo == m_currentInfo); + + // We can't call m_entry.reset() here because we always scan up to the entry + // above the one that we're looking for before declaring Done. Hence, we have + // to defer any changes to m_entry until we know that the current entry does + // not exceed what we're looking for, and that we can commit it. + hasAbsInstPC = true; + currentInstPC = 0; + diff.reset(); + diff.set(FieldID::InstPC, value); + } + + } else if (firstFieldIDBits == invalidFieldID && !isMulti) { + // Decode ExtensionEnd word. + ASSERT(savedInfo); + currentInfo = savedInfo; + // We need to clear savedInfo to indicate that we terminated the Extension with + // ExtensionEnd. Otherwise, we need to restore currentInfo after we decode the Basic word + // terminator. + savedInfo = nullptr; + + } else if (firstFieldIDBits == invalidFieldID) { + // Decode MultiWide word. + unsigned numberOfFields = (value >> multiSizeShift) & multiSizeMask; + unsigned fieldShift = multiFirstFieldShift; + for (unsigned i = 0; i < numberOfFields; ++i) { + ++currentInfo; + auto fieldID = static_cast((value >> fieldShift) & fieldIDMask); + diff.set(fieldID, currentInfo->value); + fieldShift -= fieldIDBits; + } + + } else if (isMulti) { + // Decode DuoWide word. + auto fieldID1 = static_cast(firstFieldIDBits); + auto fieldID2 = static_cast((value >> duoSecondFieldIDShift) & fieldIDMask); + diff.set(fieldID1, value >> duoFirstValueShift); + diff.set(fieldID2, value >> duoSecondValueShift); + + } else { + // Decode SingleWide word. + diff.set(static_cast(firstFieldIDBits), value); + } + + ++currentInfo; + value = currentInfo->value; + } + + // Decode Basic word. + // We check the bounds against m_endExtensionInfo here because the Basic word may be in + // the extensions section. + ASSERT(currentInfo < m_endExtensionInfo); + + diff.instPC += value >> instPCShift; + currentInstPC += diff.instPC; + + IterationStatus status = IterationStatus::Continue; + + // We want to find the entry whose InstPC is below the targetInstPC but not to exceed it. + // This means by necessity, we must always decode the next one above it before we + // know that we're done. If the current decode exceeds the target, then we need to + // abort immediately and not commit any changes to m_entry. + // + // The only exception to this is that we need to at least decode 1 entry before + // calling it quits. This only applies to opcodes at the start of the function before + // the first ExpressionInfo entry. Our historical convention is to map those to the + // first entry. The m_hasDecodedFirstEntry flag helps us achieve this. + + if (targetInstPC && m_hasDecodedFirstEntry && currentInstPC > targetInstPC.value()) { + // We're done because we have reached our targetInstPC. + status = IterationStatus::Done; + + } else { + m_hasDecodedFirstEntry = true; + + if (hasAbsInstPC) + m_entry.reset(); + m_entry.instPC = currentInstPC; + + diff.divot += cast((value >> divotShift) - divotBias); + m_entry.divot += diff.divot; + + // Unlike other values, startOffset and endOffset are always relative + // to the divot. Hence, they are never cummulative relative to the last expression + // info entry. + static constexpr unsigned startMask = (1 << startBits) - 1; + diff.start += (value >> startShift) & startMask; + m_entry.startOffset = diff.start; // Not cummulative. + + static constexpr unsigned endMask = (1 << endBits) - 1; + diff.end += (value >> endShift) & endMask; + m_entry.endOffset = diff.end; // Not cummulative. + + diff.line += cast((value >> lineShift) - lineBias); + if (diff.line) + m_entry.lineColumn.column = 0; + m_entry.lineColumn.line += diff.line; + + static constexpr unsigned columnMask = (1 << columnBits) - 1; + + unsigned columnField = (value >> columnShift) & columnMask; + diff.column += columnField == sameAsDivotValue ? diff.divot : cast(columnField - columnBias); + m_entry.lineColumn.column += diff.column; + } + + if (savedInfo) { + // We got here because we are terminating an Extension with a Basic word. + // So, we have to restore currentInfo for the next Entry. + currentInfo = savedInfo; + } + + m_nextInfo = ++currentInfo; // This is where the next Entry to decode will start. + return status; +} + +MallocPtr ExpressionInfo::createUninitialized(unsigned numberOfChapters, unsigned numberOfEncodedInfo, unsigned numberOfEncodedInfoExtensions) +{ + size_t totalSize = ExpressionInfo::totalSizeInBytes(numberOfChapters, numberOfEncodedInfo, numberOfEncodedInfoExtensions); + auto info = MallocPtr::malloc(totalSize); + new (info.get()) ExpressionInfo(numberOfChapters, numberOfEncodedInfo, numberOfEncodedInfoExtensions); + return info; +} + +ExpressionInfo::ExpressionInfo(unsigned numberOfChapters, unsigned numberOfEncodedInfo, unsigned numberOfEncodedInfoExtensions) + : m_numberOfChapters(numberOfChapters) + , m_numberOfEncodedInfo(numberOfEncodedInfo) + , m_numberOfEncodedInfoExtensions(numberOfEncodedInfoExtensions) +{ +} + +ExpressionInfo::ExpressionInfo(Vector&& chapters, Vector&& encodedInfo, unsigned numberOfEncodedInfoExtensions) + : ExpressionInfo(chapters.size(), encodedInfo.size() - numberOfEncodedInfoExtensions, numberOfEncodedInfoExtensions) +{ + std::uninitialized_copy(chapters.begin(), chapters.end(), this->chapters()); + std::uninitialized_copy(encodedInfo.begin(), encodedInfo.end(), this->encodedInfo()); +} + +size_t ExpressionInfo::byteSize() const +{ + return totalSizeInBytes(m_numberOfChapters, m_numberOfEncodedInfo, m_numberOfEncodedInfoExtensions); +} + +auto ExpressionInfo::lineColumnForInstPC(InstPC instPC) -> LineColumn +{ + auto iter = m_cachedLineColumns.find(instPC); + if (iter != m_cachedLineColumns.end()) + return iter->value; + + auto entry = entryForInstPC(instPC); + m_cachedLineColumns.add(instPC, entry.lineColumn); + return entry.lineColumn; +} + +auto ExpressionInfo::findChapterEncodedInfoJustBelow(InstPC instPC) const -> EncodedInfo* +{ + auto* chapters = this->chapters(); + unsigned low = 0; + unsigned high = m_numberOfChapters; + while (low < high) { + unsigned mid = (low + high) / 2; + if (chapters[mid].startInstPC <= instPC) + low = mid + 1; + else + high = mid; + } + + unsigned startIndex = 0; + unsigned endIndex = m_numberOfEncodedInfo; + + if (low) { + auto& chapter = chapters[low - 1]; + startIndex = chapter.startEncodedInfoIndex; + } + if (low + 1 < m_numberOfChapters) + endIndex = chapters[low].startEncodedInfoIndex; + + ASSERT_UNUSED(endIndex, startIndex <= endIndex); + return &encodedInfo()[startIndex]; +} + +auto ExpressionInfo::entryForInstPC(InstPC instPC) -> Entry +{ + Decoder decoder(*this); + + auto* chapterStart = findChapterEncodedInfoJustBelow(instPC); + decoder.setNextInfo(chapterStart); + while (decoder.decode(instPC) != IterationStatus::Done) { } + return decoder.entry(); +} + +template +void ExpressionInfo::print(PrintStream& out, FieldID fieldID, unsigned value) +{ + switch (fieldID) { + case FieldID::InstPC: + case FieldID::Start: + case FieldID::End: + out.print(cast(value)); + break; + case FieldID::Divot: + case FieldID::Line: + case FieldID::Column: + out.print(cast(value)); + break; + } +}; + + +void ExpressionInfo::dumpEncodedInfo(ExpressionInfo::EncodedInfo* start, ExpressionInfo::EncodedInfo* end) +{ + StringPrintStream out; + + EncodedInfo* curr = start; + for (unsigned index = 0; curr < end; ++index) { + unsigned value = curr->value; + out.print(" [", index, "] ", RawPointer(curr), ": ", RawHex(value)); + + if (isWideOrSpecial(value)) { + bool isSpecial = ExpressionInfo::isSpecial(value); + bool isMulti = (value >> multiBitShift) & 1; + unsigned firstFieldIDBits = (value >> firstFieldIDShift) & fieldIDMask; + + if (isSpecial) { + unsigned payload = cast(value); + if (isMulti) + out.println(" EXT ", payload); // Extension word. + else + out.println(" ABS ", payload); // AbsInstPC word. + + } else if (firstFieldIDBits == invalidFieldID && !isMulti) { + out.println(" XND"); // ExtensionEnd word. + + } else if (firstFieldIDBits == invalidFieldID) { + // MultiWide word. + unsigned numberOfFields = (value >> multiSizeShift) & multiSizeMask; + unsigned fieldShift = multiFirstFieldShift; + + out.print(" MLT ", RawHex(value), " numFields ", numberOfFields); + for (unsigned i = 1; i <= numberOfFields; ++i) { + auto fieldID = static_cast((value >> fieldShift) & fieldIDMask); + out.print(" | ", fieldID); + fieldShift -= fieldIDBits; + } + out.println(); + + // MutiWide fields. + fieldShift = multiFirstFieldShift; + for (unsigned i = 0; i < numberOfFields; ++i) { + auto fieldID = static_cast((value >> fieldShift) & fieldIDMask); + out.print(" [", i, "] ", RawPointer(&curr[i + 1]), ": ", fieldID, " ", curr[i + 1].value); + fieldShift -= fieldIDBits; + } + curr += numberOfFields; + index += numberOfFields; + + } else if (isMulti) { + // DuoWide word. + auto fieldID1 = static_cast(firstFieldIDBits); + auto fieldID2 = static_cast((value >> duoSecondFieldIDShift) & fieldIDMask); + + out.print(" DUO ", fieldID1, " "); + print(out, fieldID1, value >> duoFirstValueShift); + out.print(" ", fieldID2, " "); + print(out, fieldID2, value >> duoFirstValueShift); + out.println(); + + } else { + // SingleWide word. + auto fieldID = static_cast(firstFieldIDBits); + + out.print(" SNG ", fieldID, " "); + print(out, fieldID, value); + out.println(); + } + } else { + out.println(" BSC ", + FieldID::InstPC, " ", cast(value >> instPCShift), " ", + FieldID::Divot, " ", cast(value >> divotShift), " ", + FieldID::Start, " ", cast(value >> startShift), " ", + FieldID::End, " ", cast(value >> endShift), " ", + FieldID::Line, " ", cast(value >> lineShift), " ", + FieldID::Column, " ", cast(value >> columnShift)); + } + curr++; + } + dataLogLn(out.toString()); +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream& out, JSC::ExpressionInfo::FieldID fieldID) +{ + auto name = [] (auto fieldID) { + switch (fieldID) { + case JSC::ExpressionInfo::FieldID::InstPC: return "Inst"; + case JSC::ExpressionInfo::FieldID::Divot: return "Divot"; + case JSC::ExpressionInfo::FieldID::Start: return "Start"; + case JSC::ExpressionInfo::FieldID::End: return "End"; + case JSC::ExpressionInfo::FieldID::Line: return "Line"; + case JSC::ExpressionInfo::FieldID::Column: return "Column"; + } + return ""; // placate GCC. + }; + out.print(name(fieldID)); +} + +} // namespace WTF diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfo.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfo.h new file mode 100644 index 00000000..88ac4aac --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfo.h @@ -0,0 +1,340 @@ +/* + * Copyright (C) 2024 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "LineColumn.h" +#include +#include +#include +#include +#include +#include + +namespace JSC { + +// See comment at the top of ExpressionInfo.cpp on how ExpressionInfo works. + +class ExpressionInfo { + WTF_MAKE_NONCOPYABLE(ExpressionInfo); + WTF_MAKE_FAST_ALLOCATED; +public: + enum class FieldID : uint8_t { InstPC, Divot, Start, End, Line, Column }; + + class Decoder; + + using InstPC = unsigned; // aka bytecode PC. + + static constexpr InstPC maxInstPC = std::numeric_limits::max(); + + struct Chapter { + InstPC startInstPC; + unsigned startEncodedInfoIndex; + }; + + struct Entry { + void reset() + { + instPC = 0; + lineColumn = { 0, 0 }; + divot = 0; + startOffset = 0; + endOffset = 0; + } + + InstPC instPC { 0 }; + LineColumn lineColumn; + unsigned divot { 0 }; + unsigned startOffset { 0 }; // This value is relative to divot. + unsigned endOffset { 0 }; // This value is relative to divot. + }; + + struct EncodedInfo { + bool isAbsInstPC() const; + bool isExtension() const; + unsigned value; + }; + + struct Diff; + + class Encoder { + public: + void encode(InstPC, unsigned divot, unsigned startOffset, unsigned endOffset, LineColumn); + + template + void remap(Vector&& adjustments, RemapFunc); + + Entry entry() const { return m_entry; } + + MallocPtr createExpressionInfo(); + + void dumpEncodedInfo() // For debugging use only. + { + ExpressionInfo::dumpEncodedInfo(m_expressionInfoEncodedInfo.begin(), m_expressionInfoEncodedInfo.end()); + } + + private: + struct Wide { + enum class SortOrder : uint8_t { Single, Duo, Multi }; + + unsigned value; + FieldID fieldID; + SortOrder order { SortOrder::Multi }; + }; + + static EncodedInfo encodeAbsInstPC(InstPC absInstPC); + static EncodedInfo encodeExtension(unsigned offset); + static constexpr EncodedInfo encodeExtensionEnd(); + static EncodedInfo encodeSingle(FieldID, unsigned); + static EncodedInfo encodeDuo(FieldID, unsigned value1, FieldID, unsigned value2); + static EncodedInfo encodeMultiHeader(unsigned numWides, Wide*); + static EncodedInfo encodeBasic(const Diff&); + + void adjustInstPC(EncodedInfo*, unsigned instPCDelta); + + template bool fits(Wide); + template bool fits(T); + + Entry m_entry; + + unsigned m_currentChapterStartIndex { 0 }; + unsigned m_numberOfEncodedInfoExtensions { 0 }; + Vector m_expressionInfoChapters; + Vector m_expressionInfoEncodedInfo; + }; + + class Decoder { + public: + Decoder() = default; + Decoder(const ExpressionInfo&); + Decoder(Vector&); + + IterationStatus decode(std::optional targetInstPC = std::nullopt); + + void recacheInfo(Vector&); + EncodedInfo* currentInfo() const { return m_currentInfo; } + + // This is meant to be used to jump to the start of a chapter, where the encoder + // always start over with no historical Entry values to compute diffs from. + // If you use this to point to any EncodedInfo other than the start of a chapter, + // then you're responsible for setting up the initial conditions of m_entry correctly + // apriori. + void setNextInfo(EncodedInfo* info) { m_nextInfo = info; } + + Entry entry() const { return m_entry; } + void setEntry(Entry entry) { m_entry = entry; } // For debugging use only. + + InstPC instPC() const { return m_entry.instPC; } + unsigned divot() const { return m_entry.divot; } + unsigned startOffset() const { return m_entry.startOffset; } + unsigned endOffset() const { return m_entry.endOffset; } + LineColumn lineColumn() const { return m_entry.lineColumn; } + + private: + struct Wide { + FieldID fieldID; + unsigned value; + }; + + void appendWide(FieldID id, unsigned value) + { + m_wides[m_numWides++] = { id, value }; + } + + unsigned m_word { 0 }; + + Entry m_entry; + EncodedInfo* m_startInfo { nullptr }; + EncodedInfo* m_endInfo { nullptr }; + EncodedInfo* m_endExtensionInfo { nullptr }; + EncodedInfo* m_currentInfo { nullptr }; + EncodedInfo* m_nextInfo { nullptr }; + bool m_hasDecodedFirstEntry { false }; + + unsigned m_numWides { 0 }; + std::array m_wides; + }; + + ~ExpressionInfo() = default; + + LineColumn lineColumnForInstPC(InstPC); + Entry entryForInstPC(InstPC); + + bool isEmpty() const { return !m_numberOfEncodedInfo; }; + size_t byteSize() const; + + template + static void print(PrintStream&, FieldID, unsigned value); + static void dumpEncodedInfo(EncodedInfo* start, EncodedInfo* end); // For debugging use only. + +private: + ExpressionInfo(unsigned numberOfChapters, unsigned numberOfEncodedInfo, unsigned numberOfEncodedInfoExtensions); + ExpressionInfo(Vector&&, Vector&&, unsigned numberOfEncodedInfoExtensions); + + template static T cast(unsigned); + + static bool isSpecial(unsigned); + static bool isWideOrSpecial(unsigned); + + static size_t payloadSizeInBytes(size_t numChapters, size_t numberOfEncodedInfo, size_t numberOfEncodedInfoExtensions) + { + size_t size = numChapters * sizeof(Chapter) + (numberOfEncodedInfo + numberOfEncodedInfoExtensions) * sizeof(EncodedInfo); + return roundUpToMultipleOf(size); + } + + static size_t totalSizeInBytes(size_t numChapters, size_t numberOfEncodedInfo, size_t numberOfEncodedInfoExtensions) + { + return sizeof(ExpressionInfo) + payloadSizeInBytes(numChapters, numberOfEncodedInfo, numberOfEncodedInfoExtensions); + } + + EncodedInfo* findChapterEncodedInfoJustBelow(InstPC) const; + + Chapter* chapters() const + { + return bitwise_cast(this + 1); + } + + EncodedInfo* encodedInfo() const + { + return bitwise_cast(&chapters()[m_numberOfChapters]); + } + + EncodedInfo* endEncodedInfo() const + { + return &encodedInfo()[m_numberOfEncodedInfo]; + } + + EncodedInfo* endExtensionEncodedInfo() const + { + return &encodedInfo()[m_numberOfEncodedInfo + m_numberOfEncodedInfoExtensions]; + } + + size_t payloadSize() const + { + return payloadSizeInBytes(m_numberOfChapters, m_numberOfEncodedInfo, m_numberOfEncodedInfoExtensions) / sizeof(unsigned); + } + + unsigned* payload() const + { + return bitwise_cast(this + 1); + } + + static MallocPtr createUninitialized(unsigned numberOfChapters, unsigned numberOfEncodedInfo, unsigned numberOfEncodedInfoExtensions); + + static constexpr unsigned bitsPerWord = sizeof(unsigned) * CHAR_BIT; + + // Number of bits of each field in Basic encoding. + static constexpr unsigned instPCBits = 5; + static constexpr unsigned divotBits = 7; + static constexpr unsigned startBits = 6; + static constexpr unsigned endBits = 6; + static constexpr unsigned lineBits = 3; + static constexpr unsigned columnBits = 5; + static_assert(instPCBits + divotBits + startBits + endBits + lineBits + columnBits == bitsPerWord); + + // Bias values used for the signed diff values which make it easier to do range checks on these. + static constexpr unsigned divotBias = (1 << divotBits) / 2; + static constexpr unsigned lineBias = (1 << lineBits) / 2; + static constexpr unsigned columnBias = (1 << columnBits) / 2; + + static constexpr unsigned instPCShift = bitsPerWord - instPCBits; + static constexpr unsigned divotShift = instPCShift - divotBits; + static constexpr unsigned startShift = divotShift - startBits; + static constexpr unsigned endShift = startShift - endBits; + static constexpr unsigned lineShift = endShift - lineBits; + static constexpr unsigned columnShift = lineShift - columnBits; + + static constexpr unsigned specialHeader = (1 << instPCBits) - 1; + static constexpr unsigned wideHeader = specialHeader - 1; + + static constexpr unsigned maxInstPCValue = wideHeader - 1; + static constexpr unsigned maxBiasedDivotValue = (1 << divotBits) - 1; + static constexpr unsigned maxStartValue = (1 << startBits) - 1; + static constexpr unsigned maxEndValue = (1 << endBits) - 1; + static constexpr unsigned maxBiasedLineValue = (1 << lineBits) - 1; + + static constexpr unsigned sameAsDivotValue = (1 << columnBits) - 1; + static constexpr unsigned maxBiasedColumnValue = sameAsDivotValue - 1; + + // Number of bits in Wide / Special encodings. + static constexpr unsigned specialValueBits = 26; + static constexpr unsigned singleValueBits = 23; + static constexpr unsigned duoValueBits = 10; + static constexpr unsigned fullValueBits = 32; + static constexpr unsigned multiSizeBits = 5; + static constexpr unsigned fieldIDBits = 3; + + static constexpr unsigned maxSpecialValue = (1 << specialValueBits) - 1; + static constexpr unsigned maxSingleValue = (1 << singleValueBits) - 1; + static constexpr unsigned maxDuoValue = (1 << duoValueBits) - 1; + static constexpr unsigned invalidFieldID = (1 << fieldIDBits) - 1; + + static constexpr unsigned multiSizeMask = (1 << multiSizeBits) - 1; + static constexpr unsigned fieldIDMask = (1 << fieldIDBits) - 1; + + static constexpr unsigned headerShift = bitsPerWord - instPCBits; + static constexpr unsigned multiBitShift = headerShift - 1; + static_assert(headerShift == 27); + static_assert(multiBitShift == 26); + + static constexpr unsigned specialValueShift = multiBitShift - specialValueBits; + static_assert(!specialValueShift); + + static constexpr unsigned firstFieldIDShift = multiBitShift - fieldIDBits; + static constexpr unsigned singleValueShift = firstFieldIDShift - singleValueBits; + static_assert(!singleValueShift); + + static constexpr unsigned duoFirstValueShift = firstFieldIDShift - duoValueBits; + static constexpr unsigned duoSecondFieldIDShift = duoFirstValueShift - fieldIDBits; + static constexpr unsigned duoSecondValueShift = duoSecondFieldIDShift - duoValueBits; + static_assert(!duoSecondValueShift); + + static constexpr unsigned multiSizeShift = firstFieldIDShift - multiSizeBits; + static constexpr unsigned multiFirstFieldShift = multiSizeShift - fieldIDBits; + + static constexpr unsigned numberOfWordsBetweenChapters = 10000; + + using LineColumnMap = HashMap, WTF::UnsignedWithZeroKeyHashTraits>; + + mutable LineColumnMap m_cachedLineColumns; + unsigned m_numberOfChapters; + unsigned m_numberOfEncodedInfo; + unsigned m_numberOfEncodedInfoExtensions; + // Followed by the following which are allocated but are dynamically sized. + // Chapter chapters[numberOfChapters]; + // EncodedInfo encodedInfo[numberOfEncodedInfo + numberOfEncodedInfoExtensions]; + + friend class CachedExpressionInfo; +}; + +static_assert(roundUpToMultipleOf(sizeof(ExpressionInfo)) == sizeof(ExpressionInfo), "CachedExpressionInfo relies on this invariant"); + +} // namespace JSC + +namespace WTF { + +JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::ExpressionInfo::FieldID); + +} // namespace WTF diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfoInlines.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfoInlines.h new file mode 100644 index 00000000..91174719 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionInfoInlines.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2024 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "ExpressionInfo.h" + +namespace JSC { + +template +void ExpressionInfo::Encoder::remap(Vector&& adjustmentLabelPoints, RemapFunc remapFunc) +{ + if (!adjustmentLabelPoints.size()) + return; // Nothing to adjust. + + // Pad the end with a value that exceeds all other bytecodeIndexes. + // This way, currentLabel below will always has a meaningful value + // to compare instPC against. + adjustmentLabelPoints.append(UINT_MAX); + + ExpressionInfo::Decoder decoder(m_expressionInfoEncodedInfo); + unsigned numEncodedInfo = m_expressionInfoEncodedInfo.size(); + + // These are the types of adjustments that we need to handle: + // 1. bytecode got inserted before a LabelPoint. + // 2. bytecode got inserted after the LabelPoint. + // 3. bytecode got deleted after the LabelPoint. + // + // This means that we only need to do a remap of InstPC for the following: + // + // a. the EncodedInfo Entry at a LabelPoint InstPC (due to (1) above). + // + // In this case, the InstPC increased. Our remap will add a delta for the increment. + // Since our EncodedInfo are expressed as deltas from the previous Entry, once an + // adjustment has been applied, subsequent entries will just pick it up for free. + // + // b. the EncodedInfo Entry right after the LabelPoint InstPC (due to (2) and (3) above). + // + // Inserting / Removing bytecode after the LabelPoint affects the InstPC of bytecode + // that follows the LabelPoint starting with the bytecode immediately after. There may + // or may not be any ExpressionInfo Entry for these bytecode. However, we can just be + // conservative, and go ahead to compute the remap for the next Entry anyway. After + // that, our delta cummulation scheme takes care of the rest. + // + // There's also a chance that the next Entry is already beyond the next LabelPoint. + // This is also fine because our remap is computed based on the absolute value of its + // InstPC, not its relative value. Hence, there is no adjusment error: we'll always + // get the correct remapped InstPC value. + // + // c. the EncodedInfo Entry that start with an AbsInstPC. + // + // Above, we pointed out that because our EncodedInfo are expressed as deltas from + // the previous Entry, adjustments are picked up for free. There is one exception: + // AbsInstPC. AbsInstPC does not build on cummulative deltas. So, whenever we see an + // AbsInstPC, we must also remap it. + + unsigned adjustmentIndex = 0; + InstPC currentLabel = adjustmentLabelPoints[adjustmentIndex]; + bool needToAdjustLabelAfter = false; + unsigned cummulativeDelta = 0; + + while (decoder.decode() != IterationStatus::Done) { + bool isAbsInstPC = decoder.currentInfo()->isAbsInstPC(); + bool needRemap = isAbsInstPC; + + InstPC instPC = decoder.instPC(); + if (instPC >= currentLabel) { + needToAdjustLabelAfter = true; + needRemap = true; + currentLabel = adjustmentLabelPoints[++adjustmentIndex]; + + } else if (needToAdjustLabelAfter) { + needToAdjustLabelAfter = false; + needRemap = true; + } + + unsigned instPCDelta; + if (needRemap) { + if (isAbsInstPC) + cummulativeDelta = 0; + instPCDelta = remapFunc(instPC) - instPC - cummulativeDelta; + if (instPCDelta || isAbsInstPC) { + adjustInstPC(decoder.currentInfo(), instPCDelta); + + // adjustInstPC() may have resized and reallocated m_expressionInfoEncodedInfo. + // So, we need to re-compute endInfo. info will be re-computed at the top of the loop. + decoder.recacheInfo(m_expressionInfoEncodedInfo); + cummulativeDelta += instPCDelta; + } + } + } + m_numberOfEncodedInfoExtensions = m_expressionInfoEncodedInfo.size() - numEncodedInfo; + + // Now, let's remap the Chapter startInstPCs. Their startEncodedInfoIndex will not change because + // the above remap algorithm does in place remapping. + for (auto& chapter : m_expressionInfoChapters) + chapter.startInstPC = remapFunc(chapter.startInstPC); +} + +} // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h deleted file mode 100644 index 8f83527f..00000000 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#pragma once - -#include - -namespace JSC { - -struct ExpressionRangeInfo { - // Line and column values are encoded in 1 of 3 modes depending on the size - // of their values. These modes are: - // - // 1. FatLine: 22-bit line, 8-bit column. - // 2. FatColumn: 8-bit line, 22-bit column. - // 3. FatLineAndColumn: 32-bit line, 32-bit column. - // - // For the first 2 modes, the line and column will be encoded in the 30-bit - // position field in the ExpressionRangeInfo. For the FatLineAndColumn mode, - // the position field will hold an index into a FatPosition vector which - // holds the FatPosition records with the full 32-bit line and column values. - - enum { - FatLineMode, - FatColumnMode, - FatLineAndColumnMode - }; - - struct FatPosition { - uint32_t line; - uint32_t column; - }; - - enum { - FatLineModeLineShift = 8, - FatLineModeLineMask = (1 << 22) - 1, - FatLineModeColumnMask = (1 << 8) - 1, - FatColumnModeLineShift = 22, - FatColumnModeLineMask = (1 << 8) - 1, - FatColumnModeColumnMask = (1 << 22) - 1 - }; - - enum { - MaxOffset = (1 << 7) - 1, - MaxDivot = (1 << 25) - 1, - MaxFatLineModeLine = (1 << 22) - 1, - MaxFatLineModeColumn = (1 << 8) - 1, - MaxFatColumnModeLine = (1 << 8) - 1, - MaxFatColumnModeColumn = (1 << 22) - 1 - }; - - void encodeFatLineMode(unsigned line, unsigned column) - { - ASSERT(line <= MaxFatLineModeLine); - ASSERT(column <= MaxFatLineModeColumn); - position = ((line & FatLineModeLineMask) << FatLineModeLineShift | (column & FatLineModeColumnMask)); - } - - void encodeFatColumnMode(unsigned line, unsigned column) - { - ASSERT(line <= MaxFatColumnModeLine); - ASSERT(column <= MaxFatColumnModeColumn); - position = ((line & FatColumnModeLineMask) << FatColumnModeLineShift | (column & FatColumnModeColumnMask)); - } - - void decodeFatLineMode(unsigned& line, unsigned& column) const - { - line = (position >> FatLineModeLineShift) & FatLineModeLineMask; - column = position & FatLineModeColumnMask; - } - - void decodeFatColumnMode(unsigned& line, unsigned& column) const - { - line = (position >> FatColumnModeLineShift) & FatColumnModeLineMask; - column = position & FatColumnModeColumnMask; - } - - uint32_t instructionOffset : 25; - uint32_t startOffset : 7; - uint32_t divotPoint : 25; - uint32_t endOffset : 7; - uint32_t mode : 2; - uint32_t position : 30; -}; - -} // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h b/vendor/webkit/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h index 0f411a68..510d0bfc 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2013-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include "Operands.h" #include #include +#include namespace JSC { @@ -38,7 +39,7 @@ class CodeBlock; // Note: Full bytecode liveness does not track any information about the liveness of temps. // If you want tmp liveness for a checkpoint ask tmpLivenessForCheckpoint. class FullBytecodeLiveness { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(FullBytecodeLiveness); public: explicit FullBytecodeLiveness(size_t size) : m_usesBefore(size) diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.cpp index ed5aa9b0..aac52e1b 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.cpp @@ -312,78 +312,98 @@ GetByStatus GetByStatus::computeForStubInfoWithoutExitSiteFeedback(const Concurr // crash on null structure. return GetByStatus(JSC::slowVersion(summary), stubInfo); } - - ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( - structure, access.conditionSet(), access.uid()); - - switch (complexGetStatus.kind()) { - case ComplexGetStatus::ShouldSkip: - continue; - - case ComplexGetStatus::TakesSlowPath: - return GetByStatus(JSC::slowVersion(summary), stubInfo); - - case ComplexGetStatus::Inlineable: { - std::unique_ptr callLinkStatus; - JSFunction* intrinsicFunction = nullptr; - CodePtr customAccessorGetter; - std::unique_ptr domAttribute; - bool haveDOMAttribute = false; - switch (access.type()) { - case AccessCase::Load: - case AccessCase::GetGetter: - case AccessCase::Miss: { - break; - } - case AccessCase::IntrinsicGetter: { - intrinsicFunction = access.as().intrinsicFunction(); - break; - } - case AccessCase::Getter: { - callLinkStatus = makeUnique(); - if (CallLinkInfo* callLinkInfo = access.as().callLinkInfo()) { - *callLinkStatus = CallLinkStatus::computeFor( - locker, profiledBlock, *callLinkInfo, callExitSiteData); - } - break; - } - case AccessCase::CustomAccessorGetter: { - customAccessorGetter = access.as().customAccessor(); - if (!access.as().domAttribute()) - return GetByStatus(JSC::slowVersion(summary), stubInfo); - domAttribute = WTF::makeUnique(*access.as().domAttribute()); - haveDOMAttribute = true; - result.m_state = Custom; - break; - } - default: { - // FIXME: It would be totally sweet to support more of these at some point in the - // future. https://bugs.webkit.org/show_bug.cgi?id=133052 + switch (access.type()) { + case AccessCase::CustomAccessorGetter: { + auto conditionSet = access.conditionSet(); + if (!conditionSet.isStillValid()) + continue; + + Structure* currStructure = access.hasAlternateBase() ? access.alternateBase()->structure() : access.structure(); + // For now, we only support cases which JSGlobalObject is the same to the currently profiledBlock. + if (currStructure->globalObject() != profiledBlock->globalObject()) return GetByStatus(JSC::slowVersion(summary), stubInfo); - } } + + auto customAccessorGetter = access.as().customAccessor(); + std::unique_ptr domAttribute; + if (access.as().domAttribute()) + domAttribute = WTF::makeUnique(*access.as().domAttribute()); ASSERT((AccessCase::Miss == access.type() || access.isCustom()) == (access.offset() == invalidOffset)); - GetByVariant variant(access.identifier(), StructureSet(structure), complexGetStatus.offset(), - complexGetStatus.conditionSet(), WTFMove(callLinkStatus), - intrinsicFunction, + GetByVariant variant(access.identifier(), StructureSet(structure), invalidOffset, + WTFMove(conditionSet), nullptr, + nullptr, customAccessorGetter, WTFMove(domAttribute)); if (!result.appendVariant(variant)) return GetByStatus(JSC::slowVersion(summary), stubInfo); - if (haveDOMAttribute) { + if (domAttribute) { // Give up when custom accesses are not merged into one. if (result.numVariants() != 1) return GetByStatus(JSC::slowVersion(summary), stubInfo); + result.m_containsDOMGetter = true; } else { + if (result.m_containsDOMGetter) + return GetByStatus(JSC::slowVersion(summary), stubInfo); + } + result.m_state = CustomAccessor; + break; + } + default: { + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(structure, access.conditionSet(), access.uid()); + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return GetByStatus(JSC::slowVersion(summary), stubInfo); + + case ComplexGetStatus::Inlineable: { + std::unique_ptr callLinkStatus; + JSFunction* intrinsicFunction = nullptr; + switch (access.type()) { + case AccessCase::Load: + case AccessCase::GetGetter: + case AccessCase::Miss: { + break; + } + case AccessCase::IntrinsicGetter: { + intrinsicFunction = access.as().intrinsicFunction(); + break; + } + case AccessCase::Getter: { + callLinkStatus = makeUnique(); + if (CallLinkInfo* callLinkInfo = access.as().callLinkInfo()) { + *callLinkStatus = CallLinkStatus::computeFor( + locker, profiledBlock, *callLinkInfo, callExitSiteData); + } + break; + } + default: { + // FIXME: It would be totally sweet to support more of these at some point in the + // future. https://bugs.webkit.org/show_bug.cgi?id=133052 + return GetByStatus(JSC::slowVersion(summary), stubInfo); + } + } + + ASSERT((AccessCase::Miss == access.type() || access.isCustom()) == (access.offset() == invalidOffset)); + GetByVariant variant(access.identifier(), StructureSet(structure), complexGetStatus.offset(), + complexGetStatus.conditionSet(), WTFMove(callLinkStatus), intrinsicFunction); + + if (!result.appendVariant(variant)) + return GetByStatus(JSC::slowVersion(summary), stubInfo); + // Give up when custom access and simple access are mixed. - if (result.m_state == Custom) + if (result.m_state == CustomAccessor) return GetByStatus(JSC::slowVersion(summary), stubInfo); + break; + } } break; - } } + } + } } result.shrinkToFit(); @@ -489,7 +509,7 @@ bool GetByStatus::makesCalls() const case NoInformation: case LikelyTakesSlowPath: case ObservedTakesSlowPath: - case Custom: + case CustomAccessor: case ModuleNamespace: return false; case Simple: @@ -535,7 +555,7 @@ void GetByStatus::merge(const GetByStatus& other) case Megamorphic: if (m_state != other.m_state) { - if (other.m_state == Simple || other.m_state == Custom) { + if (other.m_state == Simple || other.m_state == CustomAccessor) { *this = other; return; } @@ -544,7 +564,7 @@ void GetByStatus::merge(const GetByStatus& other) return; case Simple: - case Custom: + case CustomAccessor: case ProxyObject: if (m_state != other.m_state) return mergeSlow(); @@ -644,8 +664,8 @@ void GetByStatus::dump(PrintStream& out) const case Simple: out.print("Simple"); break; - case Custom: - out.print("Custom"); + case CustomAccessor: + out.print("CustomAccessor"); break; case Megamorphic: out.print("Megamorphic"); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.h index 237d1ecc..cebc8245 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByStatus.h @@ -57,7 +57,7 @@ class GetByStatus final { // a possible structure chain and a possible specific value. Simple, // It's cached for a custom accessor with a possible structure chain. - Custom, + CustomAccessor, // It's cached for a megamorphic case. Megamorphic, // It's cached for an access to a module namespace object's binding. @@ -102,7 +102,7 @@ class GetByStatus final { bool isSet() const { return m_state != NoInformation; } explicit operator bool() const { return isSet(); } bool isSimple() const { return m_state == Simple; } - bool isCustom() const { return m_state == Custom; } + bool isCustomAccessor() const { return m_state == CustomAccessor; } bool isMegamorphic() const { return m_state == Megamorphic; } bool isModuleNamespace() const { return m_state == ModuleNamespace; } bool isProxyObject() const { return m_state == ProxyObject; } @@ -114,7 +114,7 @@ class GetByStatus final { bool takesSlowPath() const { - return m_state == LikelyTakesSlowPath || m_state == ObservedTakesSlowPath || m_state == MakesCalls || m_state == ObservedSlowPathAndMakesCalls || m_state == Custom || m_state == ModuleNamespace || m_state == Megamorphic; + return m_state == LikelyTakesSlowPath || m_state == ObservedTakesSlowPath || m_state == MakesCalls || m_state == ObservedSlowPathAndMakesCalls || m_state == CustomAccessor || m_state == ModuleNamespace || m_state == Megamorphic; } bool observedStructureStubInfoSlowPath() const { return m_state == ObservedTakesSlowPath || m_state == ObservedSlowPathAndMakesCalls; } bool makesCalls() const; @@ -162,7 +162,8 @@ class GetByStatus final { Vector m_variants; Box m_moduleNamespaceData; State m_state; - bool m_wasSeenInJIT { false }; + bool m_wasSeenInJIT : 1 { false }; + bool m_containsDOMGetter : 1 { false }; }; } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.cpp index 0966d01c..377f6944 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.cpp @@ -130,16 +130,15 @@ bool GetByVariant::attemptToMerge(const GetByVariant& other) if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty()) return false; - ObjectPropertyConditionSet mergedConditionSet; if (!m_conditionSet.isEmpty()) { - mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet); + auto mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet); if (!mergedConditionSet.isValid()) return false; // If this is a hit variant, one slot base should exist. If this is not a hit variant, the slot base is not necessary. if (!isPropertyUnset() && !mergedConditionSet.hasOneSlotBaseCondition()) return false; + m_conditionSet = mergedConditionSet; } - m_conditionSet = mergedConditionSet; m_structureSet.merge(other.m_structureSet); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.h index 3ed07f60..312945ea 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/GetByVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014-2021 Apple Inc. All rights reserved. + * Copyright (C) 2014-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,6 +31,7 @@ #include "PropertyOffset.h" #include "StructureSet.h" #include +#include namespace JSC { namespace DOMJIT { @@ -42,7 +43,7 @@ class GetByStatus; struct DumpContext; class GetByVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(GetByVariant); public: GetByVariant( CacheableIdentifier, diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp index 5b259d9c..6fa45f22 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp @@ -32,10 +32,6 @@ namespace JSC { -namespace GetterSetterAccessCaseInternal { -static constexpr bool verbose = false; -} - GetterSetterAccessCase::GetterSetterAccessCase(VM& vm, JSCell* owner, AccessType accessType, CacheableIdentifier identifier, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaGlobalProxy, WatchpointSet* additionalSet, JSObject* customSlotBase, RefPtr&& prototypeAccessChain) : Base(vm, owner, accessType, identifier, offset, structure, conditionSet, viaGlobalProxy, additionalSet, WTFMove(prototypeAccessChain)) { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.cpp index a72fd3d2..641fb384 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.cpp @@ -1,6 +1,6 @@ /* * Copyright (C) 2018 Yusuke Suzuki . - * Copyright (C) 2018-2021 Apple Inc. All rights reserved. + * Copyright (C) 2018-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,9 +34,12 @@ #include "InlineCacheCompiler.h" #include "StructureStubInfo.h" #include +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(InByStatus); + bool InByStatus::appendVariant(const InByVariant& variant) { return appendICStatusVariant(m_variants, variant); @@ -48,14 +51,14 @@ void InByStatus::shrinkToFit() } #if ENABLE(JIT) -InByStatus InByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, ExitFlag didExit) +InByStatus InByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, ExitFlag didExit, CodeOrigin codeOrigin) { ConcurrentJSLocker locker(profiledBlock->m_lock); InByStatus result; #if ENABLE(DFG_JIT) - result = computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock->vm(), map.get(CodeOrigin(bytecodeIndex)).stubInfo); + result = computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock->vm(), map.get(CodeOrigin(bytecodeIndex)).stubInfo, codeOrigin); if (!result.takesSlowPath() && didExit) return InByStatus(TakesSlowPath); @@ -68,9 +71,9 @@ InByStatus InByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, By return result; } -InByStatus InByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex) +InByStatus InByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, CodeOrigin codeOrigin) { - return computeFor(profiledBlock, map, bytecodeIndex, hasBadCacheExitSite(profiledBlock, bytecodeIndex)); + return computeFor(profiledBlock, map, bytecodeIndex, hasBadCacheExitSite(profiledBlock, bytecodeIndex), codeOrigin); } InByStatus InByStatus::computeFor( @@ -85,8 +88,7 @@ InByStatus InByStatus::computeFor( auto bless = [&] (const InByStatus& result) -> InByStatus { if (!context->isInlined(codeOrigin)) { - InByStatus baselineResult = computeFor( - profiledBlock, baselineMap, bytecodeIndex, didExit); + InByStatus baselineResult = computeFor(profiledBlock, baselineMap, bytecodeIndex, didExit, codeOrigin); baselineResult.merge(result); return baselineResult; } @@ -100,7 +102,7 @@ InByStatus InByStatus::computeFor( InByStatus result; { ConcurrentJSLocker locker(context->optimizedCodeBlock->m_lock); - result = computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock->vm(), status.stubInfo); + result = computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock->vm(), status.stubInfo, codeOrigin); } if (result.isSet()) return bless(result); @@ -111,21 +113,21 @@ InByStatus InByStatus::computeFor( return bless(*status.inStatus); } - return computeFor(profiledBlock, baselineMap, bytecodeIndex, didExit); + return computeFor(profiledBlock, baselineMap, bytecodeIndex, didExit, codeOrigin); } #endif // ENABLE(JIT) #if ENABLE(DFG_JIT) InByStatus InByStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin) { - InByStatus result = InByStatus::computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock->vm(), stubInfo); + InByStatus result = InByStatus::computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock->vm(), stubInfo, codeOrigin); if (!result.takesSlowPath() && hasBadCacheExitSite(profiledBlock, codeOrigin.bytecodeIndex())) return InByStatus(TakesSlowPath); return result; } -InByStatus InByStatus::computeForStubInfoWithoutExitSiteFeedback(const ConcurrentJSLocker&, VM& vm, StructureStubInfo* stubInfo) +InByStatus InByStatus::computeForStubInfoWithoutExitSiteFeedback(const ConcurrentJSLocker&, VM& vm, StructureStubInfo* stubInfo, CodeOrigin codeOrigin) { StubInfoSummary summary = StructureStubInfo::summary(vm, stubInfo); if (!isInlineable(summary)) @@ -161,6 +163,28 @@ InByStatus InByStatus::computeForStubInfoWithoutExitSiteFeedback(const Concurren case CacheType::Stub: { PolymorphicAccess* list = stubInfo->m_stub.get(); + if (list->size() == 1) { + const AccessCase& access = list->at(0); + switch (access.type()) { + case AccessCase::InMegamorphic: + case AccessCase::IndexedMegamorphicIn: { + // Emitting InMegamorphic means that we give up polymorphic IC optimization. So this needs very careful handling. + // It is possible that one function can be inlined from the other function, and then it gets limited # of structures. + // In this case, continue using IC is better than falling back to megamorphic case. But if the function gets compiled before, + // and even optimizing JIT saw the megamorphism, then this is likely that this function continues having megamorphic behavior, + // and inlined megamorphic code is faster. Currently, we use InMegamorphic only when the exact same form of CodeOrigin gets + // this megamorphic GetById before (same level of inlining etc.). This is very conservative but effective since IC is very fast + // when it worked well (but costly if it doesn't work and get megamorphic). Once this cost-benefit tradeoff gets changed (via + // handler IC), we can revisit this condition. + if (isSameStyledCodeOrigin(stubInfo->codeOrigin, codeOrigin) && !stubInfo->tookSlowPath) + return InByStatus(Megamorphic); + break; + } + default: + break; + } + } + for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { const AccessCase& access = list->at(listIndex); if (access.viaGlobalProxy()) @@ -169,6 +193,13 @@ InByStatus InByStatus::computeForStubInfoWithoutExitSiteFeedback(const Concurren if (access.usesPolyProto()) return InByStatus(TakesSlowPath); + if (!access.requiresIdentifierNameMatch()) { + // FIXME: We could use this for indexed loads in the future. This is pretty solid profiling + // information, and probably better than ArrayProfile when it's available. + // https://bugs.webkit.org/show_bug.cgi?id=204215 + return InByStatus(TakesSlowPath); + } + Structure* structure = access.structure(); if (!structure) { // The null structure cases arise due to array.length. We have no way of creating a @@ -230,7 +261,18 @@ void InByStatus::merge(const InByStatus& other) case NoInformation: *this = other; return; - + + case Megamorphic: + if (m_state != other.m_state) { + if (other.m_state == Simple) { + *this = other; + return; + } + *this = InByStatus(TakesSlowPath); + return; + } + return; + case Simple: if (other.m_state != Simple) { *this = InByStatus(TakesSlowPath); @@ -304,6 +346,9 @@ void InByStatus::dump(PrintStream& out) const case Simple: out.print("Simple"); break; + case Megamorphic: + out.print("Megamorphic"); + break; case TakesSlowPath: out.print("TakesSlowPath"); break; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.h index 2c03f4fb..750456f6 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InByStatus.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2018 Yusuke Suzuki . - * Copyright (C) 2018-2021 Apple Inc. All rights reserved. + * Copyright (C) 2018-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ #include "ICStatusMap.h" #include "InByVariant.h" #include "StubInfoSummary.h" +#include namespace JSC { @@ -40,7 +41,7 @@ class CodeBlock; class StructureStubInfo; class InByStatus final { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(InByStatus); public: enum State { // It's uncached so we have no information. @@ -48,6 +49,8 @@ class InByStatus final { // It's cached for a simple access to a known object property with // a possible structure chain and a possible specific value. Simple, + // It's cached for a megamorphic case. + Megamorphic, // It's known to often take slow path. TakesSlowPath, }; @@ -79,8 +82,8 @@ class InByStatus final { RELEASE_ASSERT_NOT_REACHED(); } - static InByStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex); - static InByStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, ExitFlag); + static InByStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, CodeOrigin); + static InByStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, ExitFlag, CodeOrigin); static InByStatus computeFor(CodeBlock* baselineBlock, ICStatusMap& baselineMap, ICStatusContextStack&, CodeOrigin); #if ENABLE(DFG_JIT) @@ -92,13 +95,14 @@ class InByStatus final { bool isSet() const { return m_state != NoInformation; } explicit operator bool() const { return isSet(); } bool isSimple() const { return m_state == Simple; } + bool isMegamorphic() const { return m_state == Megamorphic; } size_t numVariants() const { return m_variants.size(); } const Vector& variants() const { return m_variants; } const InByVariant& at(size_t index) const { return m_variants[index]; } const InByVariant& operator[](size_t index) const { return at(index); } - bool takesSlowPath() const { return m_state == TakesSlowPath; } + bool takesSlowPath() const { return m_state == Megamorphic || m_state == TakesSlowPath; } void merge(const InByStatus&); @@ -115,7 +119,7 @@ class InByStatus final { private: #if ENABLE(DFG_JIT) - static InByStatus computeForStubInfoWithoutExitSiteFeedback(const ConcurrentJSLocker&, VM&, StructureStubInfo*); + static InByStatus computeForStubInfoWithoutExitSiteFeedback(const ConcurrentJSLocker&, VM&, StructureStubInfo*, CodeOrigin); #endif bool appendVariant(const InByVariant&); void shrinkToFit(); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InByVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InByVariant.h index 55556cee..e019eb7b 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InByVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InByVariant.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2018 Yusuke Suzuki . - * Copyright (C) 2018-2021 Apple Inc. All rights reserved. + * Copyright (C) 2018-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "ObjectPropertyConditionSet.h" #include "PropertyOffset.h" #include "StructureSet.h" +#include namespace JSC { namespace DOMJIT { @@ -40,7 +41,7 @@ class InByStatus; struct DumpContext; class InByVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(InByVariant); public: InByVariant(CacheableIdentifier, const StructureSet& = StructureSet(), PropertyOffset = invalidOffset, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet()); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.cpp index 56556795..579c03f2 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.cpp @@ -149,15 +149,13 @@ void InlineAccess::dumpCacheSizesAndCrash() } -template -ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function) +ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo) { if (jit.m_assembler.buffer().codeSize() <= stubInfo.inlineCodeSize()) { bool needsBranchCompaction = true; LinkBuffer linkBuffer(jit, stubInfo.startLocation, stubInfo.inlineCodeSize(), LinkBuffer::Profile::InlineCache, JITCompilationMustSucceed, needsBranchCompaction); ASSERT(linkBuffer.isValid()); - function(linkBuffer); - FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccessType: '%s'", name); + FINALIZE_CODE(linkBuffer, NoPtrTag, ASCIILiteral::fromLiteralUnsafe(name), "InlineAccessType: '%s'", name); return true; } @@ -191,10 +189,10 @@ bool InlineAccess::generateSelfPropertyAccess(CodeBlock* codeBlock, StructureStu GPRReg base = stubInfo.m_baseGPR; JSValueRegs value = stubInfo.valueRegs(); - auto branchToSlowPath = jit.patchableBranch32( + jit.patchableBranch32( MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::structureIDOffset()), - MacroAssembler::TrustedImm32(bitwise_cast(structure->id()))); + MacroAssembler::TrustedImm32(bitwise_cast(structure->id()))).linkThunk(stubInfo.slowPathStartLocation, &jit); GPRReg storage; if (isInlineOffset(offset)) storage = base; @@ -206,10 +204,7 @@ bool InlineAccess::generateSelfPropertyAccess(CodeBlock* codeBlock, StructureStu jit.loadValue( MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value); - bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) { - linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation); - }); - return linkedCodeInline; + return linkCodeInline("property access", jit, stubInfo); } ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo) @@ -269,10 +264,10 @@ bool InlineAccess::generateSelfPropertyReplace(CodeBlock* codeBlock, StructureSt GPRReg base = stubInfo.m_baseGPR; JSValueRegs value = stubInfo.valueRegs(); - auto branchToSlowPath = jit.patchableBranch32( + jit.patchableBranch32( MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::structureIDOffset()), - MacroAssembler::TrustedImm32(bitwise_cast(structure->id()))); + MacroAssembler::TrustedImm32(bitwise_cast(structure->id()))).linkThunk(stubInfo.slowPathStartLocation, &jit); GPRReg storage; if (isInlineOffset(offset)) @@ -286,10 +281,7 @@ bool InlineAccess::generateSelfPropertyReplace(CodeBlock* codeBlock, StructureSt jit.storeValue( value, MacroAssembler::Address(storage, offsetRelativeToBase(offset))); - bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) { - linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation); - }); - return linkedCodeInline; + return linkCodeInline("property replace", jit, stubInfo); } bool InlineAccess::isCacheableArrayLength(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JSArray* array) @@ -324,16 +316,13 @@ bool InlineAccess::generateArrayLength(CodeBlock* codeBlock, StructureStubInfo& jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch); jit.and32(CCallHelpers::TrustedImm32(IndexingTypeMask), scratch); - auto branchToSlowPath = jit.patchableBranch32( - CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType())); + jit.patchableBranch32( + CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType())).linkThunk(stubInfo.slowPathStartLocation, &jit); jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR()); jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR()); jit.boxInt32(value.payloadGPR(), value); - bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) { - linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation); - }); - return linkedCodeInline; + return linkCodeInline("array length", jit, stubInfo); } bool InlineAccess::isCacheableStringLength(CodeBlock* codeBlock, StructureStubInfo& stubInfo) @@ -361,10 +350,10 @@ bool InlineAccess::generateStringLength(CodeBlock* codeBlock, StructureStubInfo& JSValueRegs value = stubInfo.valueRegs(); GPRReg scratch = getScratchRegister(stubInfo); - auto branchToSlowPath = jit.patchableBranch8( + jit.patchableBranch8( CCallHelpers::NotEqual, CCallHelpers::Address(base, JSCell::typeInfoTypeOffset()), - CCallHelpers::TrustedImm32(StringType)); + CCallHelpers::TrustedImm32(StringType)).linkThunk(stubInfo.slowPathStartLocation, &jit); jit.loadPtr(CCallHelpers::Address(base, JSString::offsetOfValue()), scratch); auto isRope = jit.branchIfRopeStringImpl(scratch); @@ -377,10 +366,7 @@ bool InlineAccess::generateStringLength(CodeBlock* codeBlock, StructureStubInfo& done.link(&jit); jit.boxInt32(value.payloadGPR(), value); - bool linkedCodeInline = linkCodeInline("string length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) { - linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation); - }); - return linkedCodeInline; + return linkCodeInline("string length", jit, stubInfo); } @@ -399,72 +385,36 @@ bool InlineAccess::generateSelfInAccess(CodeBlock* codeBlock, StructureStubInfo& GPRReg base = stubInfo.m_baseGPR; JSValueRegs value = stubInfo.valueRegs(); - auto branchToSlowPath = jit.patchableBranch32( + jit.patchableBranch32( MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::structureIDOffset()), - MacroAssembler::TrustedImm32(bitwise_cast(structure->id()))); + MacroAssembler::TrustedImm32(bitwise_cast(structure->id()))).linkThunk(stubInfo.slowPathStartLocation, &jit); jit.boxBoolean(true, value); - bool linkedCodeInline = linkCodeInline("in access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) { - linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation); - }); - return linkedCodeInline; + return linkCodeInline("in access", jit, stubInfo); } -void InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, CodeLocationLabel target) +void InlineAccess::rewireStubAsJumpInAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, InlineCacheHandler& handler) { + stubInfo.m_handler = &handler; if (codeBlock->useDataIC()) { - stubInfo.m_codePtr = target; - return; - } - - CCallHelpers::emitJITCodeOver(stubInfo.startLocation.retagged(), scopedLambda([&](CCallHelpers& jit) { - // We don't need a nop sled here because nobody should be jumping into the middle of an IC. - auto jump = jit.jump(); - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link(jump, target); - }); - }), "InlineAccess: linking constant jump"); -} - -void InlineAccess::rewireStubAsJumpInAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, CodeLocationLabel target) -{ - if (codeBlock->useDataIC()) { - stubInfo.m_codePtr = target; + stubInfo.m_codePtr = handler.callTarget(); stubInfo.m_inlineAccessBaseStructureID.clear(); // Clear out the inline access code. return; } - CCallHelpers::emitJITCodeOver(stubInfo.startLocation.retagged(), scopedLambda([&](CCallHelpers& jit) { - // We don't need a nop sled here because nobody should be jumping into the middle of an IC. - auto jump = jit.jump(); - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link(jump, target); - }); - }), "InlineAccess: linking constant jump"); + CCallHelpers::replaceWithJump(stubInfo.startLocation.retagged(), CodeLocationLabel { handler.callTarget() }); } void InlineAccess::resetStubAsJumpInAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo) { - if (codeBlock->useDataIC()) { - stubInfo.m_codePtr = stubInfo.slowPathStartLocation; - stubInfo.m_inlineAccessBaseStructureID.clear(); // Clear out the inline access code. + if (JITCode::isBaselineCode(codeBlock->jitType()) && Options::useHandlerIC()) { + auto handler = InlineCacheCompiler::generateSlowPathHandler(codeBlock->vm(), stubInfo.accessType); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, handler.get()); return; } - - CCallHelpers::emitJITCodeOver(stubInfo.startLocation.retagged(), scopedLambda([&](CCallHelpers& jit) { - // We don't need a nop sled here because nobody should be jumping into the middle of an IC. - auto jump = jit.jump(); - auto slowPathStartLocation = stubInfo.slowPathStartLocation; - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link(jump, slowPathStartLocation); - }); - }), "InlineAccess: linking constant jump"); -} - -void InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo) -{ - rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, stubInfo.slowPathStartLocation); + auto handler = InlineCacheHandler::createNonHandlerSlowPath(stubInfo.slowPathStartLocation); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, handler.get()); } } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.h index 83e187a8..f1e1faa7 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineAccess.h @@ -33,6 +33,7 @@ namespace JSC { class CodeBlock; +class InlineCacheHandler; class JSArray; class Structure; class StructureStubInfo; @@ -52,8 +53,6 @@ class InlineAccess { return 40; #elif CPU(ARM_THUMB2) return 48; -#elif CPU(MIPS) - return 72; #elif CPU(RISCV64) return 44; #else @@ -72,8 +71,6 @@ class InlineAccess { return 40; #elif CPU(ARM_THUMB2) return 48; -#elif CPU(MIPS) - return 72; #elif CPU(RISCV64) return 52; #else @@ -92,8 +89,6 @@ class InlineAccess { size_t size = 44; #elif CPU(ARM_THUMB2) size_t size = 30; -#elif CPU(MIPS) - size_t size = 56; #elif CPU(RISCV64) size_t size = 60; #else @@ -111,9 +106,7 @@ class InlineAccess { static bool generateSelfInAccess(CodeBlock*, StructureStubInfo&, Structure*); static bool generateStringLength(CodeBlock*, StructureStubInfo&); - static void rewireStubAsJumpInAccessNotUsingInlineAccess(CodeBlock*, StructureStubInfo&, CodeLocationLabel); - static void rewireStubAsJumpInAccess(CodeBlock*, StructureStubInfo&, CodeLocationLabel); - static void resetStubAsJumpInAccessNotUsingInlineAccess(CodeBlock*, StructureStubInfo&); + static void rewireStubAsJumpInAccess(CodeBlock*, StructureStubInfo&, InlineCacheHandler&); static void resetStubAsJumpInAccess(CodeBlock*, StructureStubInfo&); // This is helpful when determining the size of an IC on diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.cpp index d574853b..da97d769 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.cpp @@ -29,6 +29,7 @@ #if ENABLE(JIT) #include "AccessCaseSnippetParams.h" +#include "BaselineJITCode.h" #include "BinarySwitch.h" #include "CCallHelpers.h" #include "CacheableIdentifierInlines.h" @@ -42,13 +43,16 @@ #include "Heap.h" #include "InstanceOfAccessCase.h" #include "IntrinsicGetterAccessCase.h" +#include "JIT.h" #include "JITOperations.h" +#include "JITThunks.h" #include "JSModuleEnvironment.h" #include "JSModuleNamespaceObject.h" #include "JSTypedArrays.h" #include "JSWebAssemblyInstance.h" #include "LLIntThunks.h" #include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" #include "MegamorphicCache.h" #include "ModuleNamespaceAccessCase.h" #include "ProxyObjectAccessCase.h" @@ -74,8 +78,14 @@ DEFINE_ALLOCATOR_WITH_HEAP_IDENTIFIER(PolymorphicAccess); void AccessGenerationResult::dump(PrintStream& out) const { out.print(m_kind); - if (m_code) - out.print(":", m_code); + if (m_handler) + out.print(":", *m_handler); +} + +void InlineCacheHandler::dump(PrintStream& out) const +{ + if (m_callTarget) + out.print(m_callTarget); } static TypedArrayType toTypedArrayType(AccessCase::AccessType accessType) @@ -83,48 +93,66 @@ static TypedArrayType toTypedArrayType(AccessCase::AccessType accessType) switch (accessType) { case AccessCase::IndexedTypedArrayInt8Load: case AccessCase::IndexedTypedArrayInt8Store: + case AccessCase::IndexedTypedArrayInt8InHit: case AccessCase::IndexedResizableTypedArrayInt8Load: case AccessCase::IndexedResizableTypedArrayInt8Store: + case AccessCase::IndexedResizableTypedArrayInt8InHit: return TypeInt8; case AccessCase::IndexedTypedArrayUint8Load: case AccessCase::IndexedTypedArrayUint8Store: + case AccessCase::IndexedTypedArrayUint8InHit: case AccessCase::IndexedResizableTypedArrayUint8Load: case AccessCase::IndexedResizableTypedArrayUint8Store: + case AccessCase::IndexedResizableTypedArrayUint8InHit: return TypeUint8; case AccessCase::IndexedTypedArrayUint8ClampedLoad: case AccessCase::IndexedTypedArrayUint8ClampedStore: + case AccessCase::IndexedTypedArrayUint8ClampedInHit: case AccessCase::IndexedResizableTypedArrayUint8ClampedLoad: case AccessCase::IndexedResizableTypedArrayUint8ClampedStore: + case AccessCase::IndexedResizableTypedArrayUint8ClampedInHit: return TypeUint8Clamped; case AccessCase::IndexedTypedArrayInt16Load: case AccessCase::IndexedTypedArrayInt16Store: + case AccessCase::IndexedTypedArrayInt16InHit: case AccessCase::IndexedResizableTypedArrayInt16Load: case AccessCase::IndexedResizableTypedArrayInt16Store: + case AccessCase::IndexedResizableTypedArrayInt16InHit: return TypeInt16; case AccessCase::IndexedTypedArrayUint16Load: case AccessCase::IndexedTypedArrayUint16Store: + case AccessCase::IndexedTypedArrayUint16InHit: case AccessCase::IndexedResizableTypedArrayUint16Load: case AccessCase::IndexedResizableTypedArrayUint16Store: + case AccessCase::IndexedResizableTypedArrayUint16InHit: return TypeUint16; case AccessCase::IndexedTypedArrayInt32Load: case AccessCase::IndexedTypedArrayInt32Store: + case AccessCase::IndexedTypedArrayInt32InHit: case AccessCase::IndexedResizableTypedArrayInt32Load: case AccessCase::IndexedResizableTypedArrayInt32Store: + case AccessCase::IndexedResizableTypedArrayInt32InHit: return TypeInt32; case AccessCase::IndexedTypedArrayUint32Load: case AccessCase::IndexedTypedArrayUint32Store: + case AccessCase::IndexedTypedArrayUint32InHit: case AccessCase::IndexedResizableTypedArrayUint32Load: case AccessCase::IndexedResizableTypedArrayUint32Store: + case AccessCase::IndexedResizableTypedArrayUint32InHit: return TypeUint32; case AccessCase::IndexedTypedArrayFloat32Load: case AccessCase::IndexedTypedArrayFloat32Store: + case AccessCase::IndexedTypedArrayFloat32InHit: case AccessCase::IndexedResizableTypedArrayFloat32Load: case AccessCase::IndexedResizableTypedArrayFloat32Store: + case AccessCase::IndexedResizableTypedArrayFloat32InHit: return TypeFloat32; case AccessCase::IndexedTypedArrayFloat64Load: case AccessCase::IndexedTypedArrayFloat64Store: + case AccessCase::IndexedTypedArrayFloat64InHit: case AccessCase::IndexedResizableTypedArrayFloat64Load: case AccessCase::IndexedResizableTypedArrayFloat64Store: + case AccessCase::IndexedResizableTypedArrayFloat64InHit: return TypeFloat64; default: RELEASE_ASSERT_NOT_REACHED(); @@ -152,12 +180,263 @@ static bool forResizableTypedArray(AccessCase::AccessType accessType) case AccessCase::IndexedResizableTypedArrayUint32Store: case AccessCase::IndexedResizableTypedArrayFloat32Store: case AccessCase::IndexedResizableTypedArrayFloat64Store: + case AccessCase::IndexedResizableTypedArrayInt8InHit: + case AccessCase::IndexedResizableTypedArrayUint8InHit: + case AccessCase::IndexedResizableTypedArrayUint8ClampedInHit: + case AccessCase::IndexedResizableTypedArrayInt16InHit: + case AccessCase::IndexedResizableTypedArrayUint16InHit: + case AccessCase::IndexedResizableTypedArrayInt32InHit: + case AccessCase::IndexedResizableTypedArrayUint32InHit: + case AccessCase::IndexedResizableTypedArrayFloat32InHit: + case AccessCase::IndexedResizableTypedArrayFloat64InHit: return true; default: return false; } } +static bool needsScratchFPR(AccessCase::AccessType type) +{ + switch (type) { + case AccessCase::Load: + case AccessCase::LoadMegamorphic: + case AccessCase::StoreMegamorphic: + case AccessCase::InMegamorphic: + case AccessCase::Transition: + case AccessCase::Delete: + case AccessCase::DeleteNonConfigurable: + case AccessCase::DeleteMiss: + case AccessCase::Replace: + case AccessCase::Miss: + case AccessCase::GetGetter: + case AccessCase::Getter: + case AccessCase::Setter: + case AccessCase::CustomValueGetter: + case AccessCase::CustomAccessorGetter: + case AccessCase::CustomValueSetter: + case AccessCase::CustomAccessorSetter: + case AccessCase::InHit: + case AccessCase::InMiss: + case AccessCase::CheckPrivateBrand: + case AccessCase::SetPrivateBrand: + case AccessCase::ArrayLength: + case AccessCase::StringLength: + case AccessCase::DirectArgumentsLength: + case AccessCase::ScopedArgumentsLength: + case AccessCase::ModuleNamespaceLoad: + case AccessCase::ProxyObjectHas: + case AccessCase::ProxyObjectLoad: + case AccessCase::ProxyObjectStore: + case AccessCase::InstanceOfHit: + case AccessCase::InstanceOfMiss: + case AccessCase::InstanceOfGeneric: + case AccessCase::IndexedProxyObjectLoad: + case AccessCase::IndexedMegamorphicLoad: + case AccessCase::IndexedMegamorphicStore: + case AccessCase::IndexedMegamorphicIn: + case AccessCase::IndexedInt32Load: + case AccessCase::IndexedContiguousLoad: + case AccessCase::IndexedArrayStorageLoad: + case AccessCase::IndexedScopedArgumentsLoad: + case AccessCase::IndexedDirectArgumentsLoad: + case AccessCase::IndexedTypedArrayInt8Load: + case AccessCase::IndexedTypedArrayUint8Load: + case AccessCase::IndexedTypedArrayUint8ClampedLoad: + case AccessCase::IndexedTypedArrayInt16Load: + case AccessCase::IndexedTypedArrayUint16Load: + case AccessCase::IndexedTypedArrayInt32Load: + case AccessCase::IndexedResizableTypedArrayInt8Load: + case AccessCase::IndexedResizableTypedArrayUint8Load: + case AccessCase::IndexedResizableTypedArrayUint8ClampedLoad: + case AccessCase::IndexedResizableTypedArrayInt16Load: + case AccessCase::IndexedResizableTypedArrayUint16Load: + case AccessCase::IndexedResizableTypedArrayInt32Load: + case AccessCase::IndexedStringLoad: + case AccessCase::IndexedNoIndexingMiss: + case AccessCase::IndexedInt32Store: + case AccessCase::IndexedContiguousStore: + case AccessCase::IndexedArrayStorageStore: + case AccessCase::IndexedTypedArrayInt8Store: + case AccessCase::IndexedTypedArrayUint8Store: + case AccessCase::IndexedTypedArrayUint8ClampedStore: + case AccessCase::IndexedTypedArrayInt16Store: + case AccessCase::IndexedTypedArrayUint16Store: + case AccessCase::IndexedTypedArrayInt32Store: + case AccessCase::IndexedResizableTypedArrayInt8Store: + case AccessCase::IndexedResizableTypedArrayUint8Store: + case AccessCase::IndexedResizableTypedArrayUint8ClampedStore: + case AccessCase::IndexedResizableTypedArrayInt16Store: + case AccessCase::IndexedResizableTypedArrayUint16Store: + case AccessCase::IndexedResizableTypedArrayInt32Store: + case AccessCase::IndexedInt32InHit: + case AccessCase::IndexedContiguousInHit: + case AccessCase::IndexedArrayStorageInHit: + case AccessCase::IndexedScopedArgumentsInHit: + case AccessCase::IndexedDirectArgumentsInHit: + case AccessCase::IndexedTypedArrayInt8InHit: + case AccessCase::IndexedTypedArrayUint8InHit: + case AccessCase::IndexedTypedArrayUint8ClampedInHit: + case AccessCase::IndexedTypedArrayInt16InHit: + case AccessCase::IndexedTypedArrayUint16InHit: + case AccessCase::IndexedTypedArrayInt32InHit: + case AccessCase::IndexedResizableTypedArrayInt8InHit: + case AccessCase::IndexedResizableTypedArrayUint8InHit: + case AccessCase::IndexedResizableTypedArrayUint8ClampedInHit: + case AccessCase::IndexedResizableTypedArrayInt16InHit: + case AccessCase::IndexedResizableTypedArrayUint16InHit: + case AccessCase::IndexedResizableTypedArrayInt32InHit: + case AccessCase::IndexedStringInHit: + case AccessCase::IndexedNoIndexingInMiss: + // Indexed TypedArray InHit does not need FPR since it does not load a value. + case AccessCase::IndexedTypedArrayUint32InHit: + case AccessCase::IndexedTypedArrayFloat32InHit: + case AccessCase::IndexedTypedArrayFloat64InHit: + case AccessCase::IndexedResizableTypedArrayUint32InHit: + case AccessCase::IndexedResizableTypedArrayFloat32InHit: + case AccessCase::IndexedResizableTypedArrayFloat64InHit: + return false; + case AccessCase::IndexedDoubleLoad: + case AccessCase::IndexedTypedArrayFloat32Load: + case AccessCase::IndexedTypedArrayFloat64Load: + case AccessCase::IndexedTypedArrayUint32Load: + case AccessCase::IndexedResizableTypedArrayFloat32Load: + case AccessCase::IndexedResizableTypedArrayFloat64Load: + case AccessCase::IndexedResizableTypedArrayUint32Load: + case AccessCase::IndexedDoubleStore: + case AccessCase::IndexedTypedArrayUint32Store: + case AccessCase::IndexedTypedArrayFloat32Store: + case AccessCase::IndexedTypedArrayFloat64Store: + case AccessCase::IndexedResizableTypedArrayUint32Store: + case AccessCase::IndexedResizableTypedArrayFloat32Store: + case AccessCase::IndexedResizableTypedArrayFloat64Store: + case AccessCase::IndexedDoubleInHit: + // Used by TypedArrayLength/TypedArrayByteOffset in the process of boxing their result as a double + case AccessCase::IntrinsicGetter: + return true; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +static bool forInBy(AccessCase::AccessType type) +{ + switch (type) { + case AccessCase::Load: + case AccessCase::LoadMegamorphic: + case AccessCase::StoreMegamorphic: + case AccessCase::Transition: + case AccessCase::Delete: + case AccessCase::DeleteNonConfigurable: + case AccessCase::DeleteMiss: + case AccessCase::Replace: + case AccessCase::Miss: + case AccessCase::GetGetter: + case AccessCase::ArrayLength: + case AccessCase::StringLength: + case AccessCase::DirectArgumentsLength: + case AccessCase::ScopedArgumentsLength: + case AccessCase::CheckPrivateBrand: + case AccessCase::SetPrivateBrand: + case AccessCase::IndexedMegamorphicLoad: + case AccessCase::IndexedMegamorphicStore: + case AccessCase::IndexedInt32Load: + case AccessCase::IndexedDoubleLoad: + case AccessCase::IndexedContiguousLoad: + case AccessCase::IndexedArrayStorageLoad: + case AccessCase::IndexedScopedArgumentsLoad: + case AccessCase::IndexedDirectArgumentsLoad: + case AccessCase::IndexedTypedArrayInt8Load: + case AccessCase::IndexedTypedArrayUint8Load: + case AccessCase::IndexedTypedArrayUint8ClampedLoad: + case AccessCase::IndexedTypedArrayInt16Load: + case AccessCase::IndexedTypedArrayUint16Load: + case AccessCase::IndexedTypedArrayInt32Load: + case AccessCase::IndexedTypedArrayUint32Load: + case AccessCase::IndexedTypedArrayFloat32Load: + case AccessCase::IndexedTypedArrayFloat64Load: + case AccessCase::IndexedResizableTypedArrayInt8Load: + case AccessCase::IndexedResizableTypedArrayUint8Load: + case AccessCase::IndexedResizableTypedArrayUint8ClampedLoad: + case AccessCase::IndexedResizableTypedArrayInt16Load: + case AccessCase::IndexedResizableTypedArrayUint16Load: + case AccessCase::IndexedResizableTypedArrayInt32Load: + case AccessCase::IndexedResizableTypedArrayUint32Load: + case AccessCase::IndexedResizableTypedArrayFloat32Load: + case AccessCase::IndexedResizableTypedArrayFloat64Load: + case AccessCase::IndexedInt32Store: + case AccessCase::IndexedDoubleStore: + case AccessCase::IndexedContiguousStore: + case AccessCase::IndexedArrayStorageStore: + case AccessCase::IndexedTypedArrayInt8Store: + case AccessCase::IndexedTypedArrayUint8Store: + case AccessCase::IndexedTypedArrayUint8ClampedStore: + case AccessCase::IndexedTypedArrayInt16Store: + case AccessCase::IndexedTypedArrayUint16Store: + case AccessCase::IndexedTypedArrayInt32Store: + case AccessCase::IndexedTypedArrayUint32Store: + case AccessCase::IndexedTypedArrayFloat32Store: + case AccessCase::IndexedTypedArrayFloat64Store: + case AccessCase::IndexedResizableTypedArrayInt8Store: + case AccessCase::IndexedResizableTypedArrayUint8Store: + case AccessCase::IndexedResizableTypedArrayUint8ClampedStore: + case AccessCase::IndexedResizableTypedArrayInt16Store: + case AccessCase::IndexedResizableTypedArrayUint16Store: + case AccessCase::IndexedResizableTypedArrayInt32Store: + case AccessCase::IndexedResizableTypedArrayUint32Store: + case AccessCase::IndexedResizableTypedArrayFloat32Store: + case AccessCase::IndexedResizableTypedArrayFloat64Store: + case AccessCase::IndexedStringLoad: + case AccessCase::IndexedNoIndexingMiss: + case AccessCase::InstanceOfGeneric: + case AccessCase::Getter: + case AccessCase::Setter: + case AccessCase::ProxyObjectHas: + case AccessCase::ProxyObjectLoad: + case AccessCase::ProxyObjectStore: + case AccessCase::IndexedProxyObjectLoad: + case AccessCase::CustomValueGetter: + case AccessCase::CustomAccessorGetter: + case AccessCase::CustomValueSetter: + case AccessCase::CustomAccessorSetter: + case AccessCase::IntrinsicGetter: + case AccessCase::ModuleNamespaceLoad: + case AccessCase::InstanceOfHit: + case AccessCase::InstanceOfMiss: + return false; + case AccessCase::InHit: + case AccessCase::InMiss: + case AccessCase::InMegamorphic: + case AccessCase::IndexedInt32InHit: + case AccessCase::IndexedDoubleInHit: + case AccessCase::IndexedContiguousInHit: + case AccessCase::IndexedArrayStorageInHit: + case AccessCase::IndexedScopedArgumentsInHit: + case AccessCase::IndexedDirectArgumentsInHit: + case AccessCase::IndexedTypedArrayInt8InHit: + case AccessCase::IndexedTypedArrayUint8InHit: + case AccessCase::IndexedTypedArrayUint8ClampedInHit: + case AccessCase::IndexedTypedArrayInt16InHit: + case AccessCase::IndexedTypedArrayUint16InHit: + case AccessCase::IndexedTypedArrayInt32InHit: + case AccessCase::IndexedTypedArrayUint32InHit: + case AccessCase::IndexedTypedArrayFloat32InHit: + case AccessCase::IndexedTypedArrayFloat64InHit: + case AccessCase::IndexedResizableTypedArrayInt8InHit: + case AccessCase::IndexedResizableTypedArrayUint8InHit: + case AccessCase::IndexedResizableTypedArrayUint8ClampedInHit: + case AccessCase::IndexedResizableTypedArrayInt16InHit: + case AccessCase::IndexedResizableTypedArrayUint16InHit: + case AccessCase::IndexedResizableTypedArrayInt32InHit: + case AccessCase::IndexedResizableTypedArrayUint32InHit: + case AccessCase::IndexedResizableTypedArrayFloat32InHit: + case AccessCase::IndexedResizableTypedArrayFloat64InHit: + case AccessCase::IndexedStringInHit: + case AccessCase::IndexedNoIndexingInMiss: + case AccessCase::IndexedMegamorphicIn: + return true; + } + return false; +} + void InlineCacheCompiler::installWatchpoint(CodeBlock* codeBlock, const ObjectPropertyCondition& condition) { WatchpointsOnStructureStubInfo::ensureReferenceAndInstallWatchpoint(m_watchpoints, codeBlock, m_stubInfo, condition); @@ -168,13 +447,24 @@ void InlineCacheCompiler::restoreScratch() m_allocator->restoreReusedRegistersByPopping(*m_jit, m_preservedReusedRegisterState); } +inline bool InlineCacheCompiler::useHandlerIC() const +{ + return JITCode::isBaselineCode(m_jitType) && Options::useHandlerIC(); +} + void InlineCacheCompiler::succeed() { restoreScratch(); - if (m_jit->codeBlock()->useDataIC()) + if (useHandlerIC()) { + emitDataICEpilogue(*m_jit); + m_jit->ret(); + return; + } + if (m_jit->codeBlock()->useDataIC()) { m_jit->farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfDoneLocation()), JSInternalPtrTag); - else - m_success.append(m_jit->jump()); + return; + } + m_success.append(m_jit->jump()); } const ScalarRegisterSet& InlineCacheCompiler::liveRegistersForCall() @@ -207,7 +497,7 @@ const ScalarRegisterSet& InlineCacheCompiler::calculateLiveRegistersForCallAndEx m_liveRegistersToPreserveAtExceptionHandlingCallSite = m_jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(m_jit->codeBlock(), m_stubInfo->callSiteIndex).buildScalarRegisterSet(); m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0; if (m_needsToRestoreRegistersIfException) - RELEASE_ASSERT(JITCode::isOptimizingJIT(m_jit->codeBlock()->jitType())); + RELEASE_ASSERT(JSC::JITCode::isOptimizingJIT(m_jit->codeBlock()->jitType())); auto liveRegistersForCall = RegisterSetBuilder(m_liveRegistersToPreserveAtExceptionHandlingCallSite.toRegisterSet(), m_allocator->usedRegisters()); if (m_jit->codeBlock()->useDataIC()) @@ -337,24 +627,13 @@ void InlineCacheCompiler::emitExplicitExceptionHandler() // does here. I.e, set callFrameForCatch and copy callee saves. m_jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch()); - CCallHelpers::Jump jumpToOSRExitExceptionHandler = m_jit->jump(); - // We don't need to insert a new exception handler in the table // because we're doing a manual exception check here. i.e, we'll // never arrive here from genericUnwind(). HandlerInfo originalHandler = originalExceptionHandler(); - m_jit->addLinkTask( - [=] (LinkBuffer& linkBuffer) { - linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode); - }); - } else { - CCallHelpers::Jump jumpToExceptionHandler = m_jit->jump(); - VM* vm = &m_vm; - m_jit->addLinkTask( - [=] (LinkBuffer& linkBuffer) { - linkBuffer.link(jumpToExceptionHandler, CodeLocationLabel(vm->getCTIStub(handleExceptionGenerator).retaggedCode())); - }); - } + m_jit->jumpThunk(originalHandler.nativeCode); + } else + m_jit->jumpThunk(CodeLocationLabel(m_vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode())); } ScratchRegisterAllocator InlineCacheCompiler::makeDefaultScratchAllocator(GPRReg extraToLock) @@ -375,6 +654,509 @@ ScratchRegisterAllocator InlineCacheCompiler::makeDefaultScratchAllocator(GPRReg return allocator; } +#if CPU(X86_64) && OS(WINDOWS) +static constexpr size_t prologueSizeInBytesDataIC = 5; +#elif CPU(X86_64) +static constexpr size_t prologueSizeInBytesDataIC = 1; +#elif CPU(ARM64E) +static constexpr size_t prologueSizeInBytesDataIC = 8; +#elif CPU(ARM64) +static constexpr size_t prologueSizeInBytesDataIC = 4; +#elif CPU(ARM_THUMB2) +static constexpr size_t prologueSizeInBytesDataIC = 6; +#elif CPU(RISCV64) +static constexpr size_t prologueSizeInBytesDataIC = 12; +#else +#error "unsupported architecture" +#endif + +void InlineCacheCompiler::emitDataICPrologue(CCallHelpers& jit) +{ + // Important difference from the normal emitPrologue is that DataIC handler does not change callFrameRegister. + // callFrameRegister is an original one of the caller JS function. This removes necessity of complicated handling + // of exception unwinding, and it allows operations to access to CallFrame* via callFrameRegister. +#if ASSERT_ENABLED + size_t startOffset = jit.debugOffset(); +#endif + +#if CPU(X86_64) && OS(WINDOWS) + static_assert(maxFrameExtentForSlowPathCall); + jit.push(CCallHelpers::framePointerRegister); + jit.subPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); +#elif CPU(X86_64) + static_assert(!maxFrameExtentForSlowPathCall); + jit.push(CCallHelpers::framePointerRegister); +#elif CPU(ARM64) + static_assert(!maxFrameExtentForSlowPathCall); + jit.tagReturnAddress(); + jit.pushPair(CCallHelpers::framePointerRegister, CCallHelpers::linkRegister); +#elif CPU(ARM_THUMB2) + static_assert(maxFrameExtentForSlowPathCall); + jit.pushPair(CCallHelpers::framePointerRegister, CCallHelpers::linkRegister); + jit.subPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); +#elif CPU(RISCV64) + static_assert(!maxFrameExtentForSlowPathCall); + jit.pushPair(CCallHelpers::framePointerRegister, CCallHelpers::linkRegister); +#else +#error "unsupported architecture" +#endif + +#if ASSERT_ENABLED + ASSERT(prologueSizeInBytesDataIC == (jit.debugOffset() - startOffset)); +#endif +} + +void InlineCacheCompiler::emitDataICEpilogue(CCallHelpers& jit) +{ + if constexpr (!!maxFrameExtentForSlowPathCall) + jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); + jit.emitFunctionEpilogueWithEmptyFrame(); +} + +static MacroAssemblerCodeRef getByIdSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationGetByIdOptimize); + + using BaselineJITRegisters::GetById::baseJSR; + using BaselineJITRegisters::GetById::globalObjectGPR; + using BaselineJITRegisters::GetById::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "get_by_id_slow"_s, "DataIC get_by_id_slow"); +} + +static MacroAssemblerCodeRef getByIdWithThisSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationGetByIdWithThisOptimize); + + using BaselineJITRegisters::GetByIdWithThis::baseJSR; + using BaselineJITRegisters::GetByIdWithThis::thisJSR; + using BaselineJITRegisters::GetByIdWithThis::globalObjectGPR; + using BaselineJITRegisters::GetByIdWithThis::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, thisJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "get_by_id_with_this_slow"_s, "DataIC get_by_id_with_this_slow"); +} + +static MacroAssemblerCodeRef getByValSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationGetByValOptimize); + + using BaselineJITRegisters::GetByVal::baseJSR; + using BaselineJITRegisters::GetByVal::propertyJSR; + using BaselineJITRegisters::GetByVal::globalObjectGPR; + using BaselineJITRegisters::GetByVal::stubInfoGPR; + using BaselineJITRegisters::GetByVal::profileGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, propertyJSR, globalObjectGPR, stubInfoGPR, profileGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "get_by_val_slow"_s, "DataIC get_by_val_slow"); +} + +static MacroAssemblerCodeRef getPrivateNameSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationGetPrivateNameOptimize); + + using BaselineJITRegisters::PrivateBrand::baseJSR; + using BaselineJITRegisters::PrivateBrand::propertyJSR; + using BaselineJITRegisters::PrivateBrand::globalObjectGPR; + using BaselineJITRegisters::PrivateBrand::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, propertyJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "get_private_name_slow"_s, "DataIC get_private_name_slow"); +} + +#if USE(JSVALUE64) +static MacroAssemblerCodeRef getByValWithThisSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationGetByValWithThisOptimize); + + using BaselineJITRegisters::GetByValWithThis::baseJSR; + using BaselineJITRegisters::GetByValWithThis::propertyJSR; + using BaselineJITRegisters::GetByValWithThis::thisJSR; + using BaselineJITRegisters::GetByValWithThis::globalObjectGPR; + using BaselineJITRegisters::GetByValWithThis::stubInfoGPR; + using BaselineJITRegisters::GetByValWithThis::profileGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, propertyJSR, thisJSR, globalObjectGPR, stubInfoGPR, profileGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "get_by_val_with_this_slow"_s, "DataIC get_by_val_with_this_slow"); +} +#endif + +static MacroAssemblerCodeRef putByIdSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationPutByIdStrictOptimize); + + using BaselineJITRegisters::PutById::baseJSR; + using BaselineJITRegisters::PutById::valueJSR; + using BaselineJITRegisters::PutById::globalObjectGPR; + using BaselineJITRegisters::PutById::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(valueJSR, baseJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "put_by_id_slow"_s, "DataIC put_by_id_slow"); +} + +static MacroAssemblerCodeRef putByValSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperatoin = decltype(operationPutByValStrictOptimize); + + using BaselineJITRegisters::PutByVal::baseJSR; + using BaselineJITRegisters::PutByVal::propertyJSR; + using BaselineJITRegisters::PutByVal::valueJSR; + using BaselineJITRegisters::PutByVal::profileGPR; + using BaselineJITRegisters::PutByVal::stubInfoGPR; + using BaselineJITRegisters::PutByVal::globalObjectGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, propertyJSR, valueJSR, globalObjectGPR, stubInfoGPR, profileGPR); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); +#if CPU(ARM_THUMB2) + // ARMv7 clobbers metadataTable register. Thus we need to restore them back here. + JIT::emitMaterializeMetadataAndConstantPoolRegisters(jit); +#endif + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "put_by_val_slow"_s, "DataIC put_by_val_slow"); +} + +static MacroAssemblerCodeRef instanceOfSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationInstanceOfOptimize); + + using BaselineJITRegisters::Instanceof::valueJSR; + using BaselineJITRegisters::Instanceof::protoJSR; + using BaselineJITRegisters::Instanceof::globalObjectGPR; + using BaselineJITRegisters::Instanceof::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(valueJSR, protoJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "instanceof_slow"_s, "DataIC instanceof_slow"); +} + +static MacroAssemblerCodeRef delByIdSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationDeleteByIdStrictOptimize); + + using BaselineJITRegisters::DelById::baseJSR; + using BaselineJITRegisters::DelById::globalObjectGPR; + using BaselineJITRegisters::DelById::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "del_by_id_slow"_s, "DataIC del_by_id_slow"); +} + +static MacroAssemblerCodeRef delByValSlowPathCodeGenerator(VM& vm) +{ + CCallHelpers jit; + + using SlowOperation = decltype(operationDeleteByValStrictOptimize); + + using BaselineJITRegisters::DelByVal::baseJSR; + using BaselineJITRegisters::DelByVal::propertyJSR; + using BaselineJITRegisters::DelByVal::globalObjectGPR; + using BaselineJITRegisters::DelByVal::stubInfoGPR; + + InlineCacheCompiler::emitDataICPrologue(jit); + + // Call slow operation + jit.prepareCallOperation(vm); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfGlobalObject()), globalObjectGPR); + jit.setupArguments(baseJSR, propertyJSR, globalObjectGPR, stubInfoGPR); + static_assert(preferredArgumentGPR() == stubInfoGPR, "Needed for branch to slow operation via StubInfo"); + jit.call(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), OperationPtrTag); + + jit.emitNonPatchableExceptionCheck(vm).linkThunk(CodeLocationLabel(vm.getCTIStub(CommonJITThunkID::HandleException).retaggedCode()), &jit); + + InlineCacheCompiler::emitDataICEpilogue(jit); + jit.ret(); + + // While sp is extended, it is OK. Jump target will adjust it. + LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::InlineCache); + return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "del_by_val_slow"_s, "DataIC del_by_val_slow"); +} + +MacroAssemblerCodeRef InlineCacheCompiler::generateSlowPathCode(VM& vm, AccessType type) +{ + switch (type) { + case AccessType::GetById: + case AccessType::TryGetById: + case AccessType::GetByIdDirect: + case AccessType::InById: + case AccessType::GetPrivateNameById: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(getByIdSlowPathCodeGenerator); + } + + case AccessType::GetByIdWithThis: + return vm.getCTIStub(getByIdWithThisSlowPathCodeGenerator); + + case AccessType::GetByVal: + case AccessType::InByVal: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(getByValSlowPathCodeGenerator); + } + + case AccessType::GetByValWithThis: { +#if USE(JSVALUE64) + return vm.getCTIStub(getByValWithThisSlowPathCodeGenerator); +#else + RELEASE_ASSERT_NOT_REACHED(); + return { }; +#endif + } + + case AccessType::GetPrivateName: + case AccessType::HasPrivateBrand: + case AccessType::HasPrivateName: + case AccessType::CheckPrivateBrand: + case AccessType::SetPrivateBrand: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(getPrivateNameSlowPathCodeGenerator); + } + + case AccessType::PutByIdStrict: + case AccessType::PutByIdSloppy: + case AccessType::PutByIdDirectStrict: + case AccessType::PutByIdDirectSloppy: + case AccessType::DefinePrivateNameById: + case AccessType::SetPrivateNameById: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(putByIdSlowPathCodeGenerator); + } + + case AccessType::PutByValStrict: + case AccessType::PutByValSloppy: + case AccessType::PutByValDirectStrict: + case AccessType::PutByValDirectSloppy: + case AccessType::DefinePrivateNameByVal: + case AccessType::SetPrivateNameByVal: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(putByValSlowPathCodeGenerator); + } + + case AccessType::InstanceOf: + return vm.getCTIStub(instanceOfSlowPathCodeGenerator); + + case AccessType::DeleteByIdStrict: + case AccessType::DeleteByIdSloppy: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(delByIdSlowPathCodeGenerator); + } + + case AccessType::DeleteByValStrict: + case AccessType::DeleteByValSloppy: { + using ArgumentTypes = FunctionTraits::ArgumentTypes; + static_assert(std::is_same_v::ArgumentTypes, ArgumentTypes>); + return vm.getCTIStub(delByValSlowPathCodeGenerator); + } + } + + RELEASE_ASSERT_NOT_REACHED(); + return { }; +} + +InlineCacheHandler::InlineCacheHandler(Ref&& stubRoutine, std::unique_ptr&& watchpoints) + : m_callTarget(stubRoutine->code().code().template retagged()) + , m_jumpTarget(CodePtr { m_callTarget.retagged().dataLocation() + prologueSizeInBytesDataIC }.template retagged()) + , m_stubRoutine(WTFMove(stubRoutine)) + , m_watchpoints(WTFMove(watchpoints)) +{ +} + +Ref InlineCacheHandler::createNonHandlerSlowPath(CodePtr slowPath) +{ + auto result = adoptRef(*new InlineCacheHandler); + result->m_callTarget = slowPath; + result->m_jumpTarget = slowPath; + return result; +} + +Ref InlineCacheHandler::createSlowPath(VM& vm, AccessType accessType) +{ + auto result = adoptRef(*new InlineCacheHandler); + auto codeRef = InlineCacheCompiler::generateSlowPathCode(vm, accessType); + result->m_callTarget = codeRef.code().template retagged(); + result->m_jumpTarget = CodePtr { codeRef.retaggedCode().dataLocation() + prologueSizeInBytesDataIC }.template retagged(); + return result; +} + +Ref InlineCacheCompiler::generateSlowPathHandler(VM& vm, AccessType accessType) +{ + ASSERT(!isCompilationThread()); + ASSERT(Options::useHandlerIC()); + if (auto handler = vm.m_sharedJITStubs->getSlowPathHandler(accessType)) + return handler.releaseNonNull(); + auto handler = InlineCacheHandler::createSlowPath(vm, accessType); + vm.m_sharedJITStubs->setSlowPathHandler(accessType, handler); + return handler; +} + void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers::JumpList& fallThrough) { SuperSamplerScope superSamplerScope(false); @@ -540,7 +1322,8 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers return; } - case AccessCase::IndexedScopedArgumentsLoad: { + case AccessCase::IndexedScopedArgumentsLoad: + case AccessCase::IndexedScopedArgumentsInHit: { ASSERT(!accessCase.viaGlobalProxy()); // This code is written such that the result could alias with the base or the property. GPRReg propertyGPR = stubInfo.propertyGPR(); @@ -569,7 +1352,10 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers jit.zeroExtend32ToWord(propertyGPR, scratch3GPR); jit.load32(CCallHelpers::BaseIndex(scratchGPR, scratch3GPR, CCallHelpers::TimesFour), scratchGPR); failAndIgnore.append(jit.branch32(CCallHelpers::Equal, scratchGPR, CCallHelpers::TrustedImm32(ScopeOffset::invalidOffset))); - jit.loadValue(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesEight, JSLexicalEnvironment::offsetOfVariables()), valueRegs); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else + jit.loadValue(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesEight, JSLexicalEnvironment::offsetOfVariables()), valueRegs); auto done = jit.jump(); overflowCase.link(&jit); @@ -579,12 +1365,19 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers #if USE(JSVALUE64) jit.loadValue(CCallHelpers::BaseIndex(scratch3GPR, scratch2GPR, CCallHelpers::TimesEight), JSValueRegs(scratchGPR)); failAndIgnore.append(jit.branchIfEmpty(scratchGPR)); - jit.move(scratchGPR, valueRegs.payloadGPR()); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else + jit.move(scratchGPR, valueRegs.payloadGPR()); #else jit.loadValue(CCallHelpers::BaseIndex(scratch3GPR, scratch2GPR, CCallHelpers::TimesEight), JSValueRegs(scratch2GPR, scratchGPR)); failAndIgnore.append(jit.branchIfEmpty(scratch2GPR)); - jit.move(scratchGPR, valueRegs.payloadGPR()); - jit.move(scratch2GPR, valueRegs.tagGPR()); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else { + jit.move(scratchGPR, valueRegs.payloadGPR()); + jit.move(scratch2GPR, valueRegs.tagGPR()); + } #endif done.link(&jit); @@ -602,7 +1395,8 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers return; } - case AccessCase::IndexedDirectArgumentsLoad: { + case AccessCase::IndexedDirectArgumentsLoad: + case AccessCase::IndexedDirectArgumentsInHit: { ASSERT(!accessCase.viaGlobalProxy()); // This code is written such that the result could alias with the base or the property. GPRReg propertyGPR = stubInfo.propertyGPR(); @@ -612,8 +1406,12 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers jit.load32(CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()), scratchGPR); m_failAndRepatch.append(jit.branch32(CCallHelpers::AboveOrEqual, propertyGPR, scratchGPR)); m_failAndRepatch.append(jit.branchTestPtr(CCallHelpers::NonZero, CCallHelpers::Address(baseGPR, DirectArguments::offsetOfMappedArguments()))); - jit.zeroExtend32ToWord(propertyGPR, scratchGPR); - jit.loadValue(CCallHelpers::BaseIndex(baseGPR, scratchGPR, CCallHelpers::TimesEight, DirectArguments::storageOffset()), valueRegs); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else { + jit.zeroExtend32ToWord(propertyGPR, scratchGPR); + jit.loadValue(CCallHelpers::BaseIndex(baseGPR, scratchGPR, CCallHelpers::TimesEight, DirectArguments::storageOffset()), valueRegs); + } succeed(); return; } @@ -635,7 +1433,25 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers case AccessCase::IndexedResizableTypedArrayInt32Load: case AccessCase::IndexedResizableTypedArrayUint32Load: case AccessCase::IndexedResizableTypedArrayFloat32Load: - case AccessCase::IndexedResizableTypedArrayFloat64Load: { + case AccessCase::IndexedResizableTypedArrayFloat64Load: + case AccessCase::IndexedTypedArrayInt8InHit: + case AccessCase::IndexedTypedArrayUint8InHit: + case AccessCase::IndexedTypedArrayUint8ClampedInHit: + case AccessCase::IndexedTypedArrayInt16InHit: + case AccessCase::IndexedTypedArrayUint16InHit: + case AccessCase::IndexedTypedArrayInt32InHit: + case AccessCase::IndexedTypedArrayUint32InHit: + case AccessCase::IndexedTypedArrayFloat32InHit: + case AccessCase::IndexedTypedArrayFloat64InHit: + case AccessCase::IndexedResizableTypedArrayInt8InHit: + case AccessCase::IndexedResizableTypedArrayUint8InHit: + case AccessCase::IndexedResizableTypedArrayUint8ClampedInHit: + case AccessCase::IndexedResizableTypedArrayInt16InHit: + case AccessCase::IndexedResizableTypedArrayUint16InHit: + case AccessCase::IndexedResizableTypedArrayInt32InHit: + case AccessCase::IndexedResizableTypedArrayUint32InHit: + case AccessCase::IndexedResizableTypedArrayFloat32InHit: + case AccessCase::IndexedResizableTypedArrayFloat64InHit: { ASSERT(!accessCase.viaGlobalProxy()); // This code is written such that the result could alias with the base or the property. @@ -676,68 +1492,72 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers #endif } + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else { #if USE(LARGE_TYPED_ARRAYS) - jit.load64(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfLength()), scratchGPR); + jit.load64(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfLength()), scratchGPR); #else - jit.load32(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfLength()), scratchGPR); + jit.load32(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfLength()), scratchGPR); #endif - jit.loadPtr(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfVector()), scratch2GPR); - jit.cageConditionallyAndUntag(Gigacage::Primitive, scratch2GPR, scratchGPR, scratchGPR, false); - jit.signExtend32ToPtr(propertyGPR, scratchGPR); - if (isInt(type)) { - switch (elementSize(type)) { - case 1: - if (JSC::isSigned(type)) - jit.load8SignedExtendTo32(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesOne), valueRegs.payloadGPR()); - else - jit.load8(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesOne), valueRegs.payloadGPR()); - break; - case 2: - if (JSC::isSigned(type)) - jit.load16SignedExtendTo32(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesTwo), valueRegs.payloadGPR()); - else - jit.load16(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesTwo), valueRegs.payloadGPR()); - break; - case 4: - jit.load32(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesFour), valueRegs.payloadGPR()); - break; - default: - CRASH(); - } + jit.loadPtr(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfVector()), scratch2GPR); + jit.cageConditionally(Gigacage::Primitive, scratch2GPR, scratchGPR, scratchGPR); + jit.signExtend32ToPtr(propertyGPR, scratchGPR); + if (isInt(type)) { + switch (elementSize(type)) { + case 1: + if (JSC::isSigned(type)) + jit.load8SignedExtendTo32(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesOne), valueRegs.payloadGPR()); + else + jit.load8(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesOne), valueRegs.payloadGPR()); + break; + case 2: + if (JSC::isSigned(type)) + jit.load16SignedExtendTo32(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesTwo), valueRegs.payloadGPR()); + else + jit.load16(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesTwo), valueRegs.payloadGPR()); + break; + case 4: + jit.load32(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesFour), valueRegs.payloadGPR()); + break; + default: + CRASH(); + } + + CCallHelpers::Jump done; + if (type == TypeUint32) { + RELEASE_ASSERT(m_scratchFPR != InvalidFPRReg); + auto canBeInt = jit.branch32(CCallHelpers::GreaterThanOrEqual, valueRegs.payloadGPR(), CCallHelpers::TrustedImm32(0)); + + jit.convertInt32ToDouble(valueRegs.payloadGPR(), m_scratchFPR); + jit.addDouble(CCallHelpers::AbsoluteAddress(&CCallHelpers::twoToThe32), m_scratchFPR); + jit.boxDouble(m_scratchFPR, valueRegs); + done = jit.jump(); + canBeInt.link(&jit); + } - CCallHelpers::Jump done; - if (type == TypeUint32) { + jit.boxInt32(valueRegs.payloadGPR(), valueRegs); + if (done.isSet()) + done.link(&jit); + } else { + ASSERT(isFloat(type)); RELEASE_ASSERT(m_scratchFPR != InvalidFPRReg); - auto canBeInt = jit.branch32(CCallHelpers::GreaterThanOrEqual, valueRegs.payloadGPR(), CCallHelpers::TrustedImm32(0)); + switch (elementSize(type)) { + case 4: + jit.loadFloat(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesFour), m_scratchFPR); + jit.convertFloatToDouble(m_scratchFPR, m_scratchFPR); + break; + case 8: { + jit.loadDouble(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesEight), m_scratchFPR); + break; + } + default: + CRASH(); + } - jit.convertInt32ToDouble(valueRegs.payloadGPR(), m_scratchFPR); - jit.addDouble(CCallHelpers::AbsoluteAddress(&CCallHelpers::twoToThe32), m_scratchFPR); + jit.purifyNaN(m_scratchFPR); jit.boxDouble(m_scratchFPR, valueRegs); - done = jit.jump(); - canBeInt.link(&jit); - } - - jit.boxInt32(valueRegs.payloadGPR(), valueRegs); - if (done.isSet()) - done.link(&jit); - } else { - ASSERT(isFloat(type)); - RELEASE_ASSERT(m_scratchFPR != InvalidFPRReg); - switch (elementSize(type)) { - case 4: - jit.loadFloat(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesFour), m_scratchFPR); - jit.convertFloatToDouble(m_scratchFPR, m_scratchFPR); - break; - case 8: { - jit.loadDouble(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesEight), m_scratchFPR); - break; } - default: - CRASH(); - } - - jit.purifyNaN(m_scratchFPR); - jit.boxDouble(m_scratchFPR, valueRegs); } allocator.restoreReusedRegistersByPopping(jit, preservedState); @@ -751,7 +1571,8 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers return; } - case AccessCase::IndexedStringLoad: { + case AccessCase::IndexedStringLoad: + case AccessCase::IndexedStringInHit: { ASSERT(!accessCase.viaGlobalProxy()); // This code is written such that the result could alias with the base or the property. GPRReg propertyGPR = stubInfo.propertyGPR(); @@ -772,21 +1593,26 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers failAndIgnore.append(jit.branch32(CCallHelpers::AboveOrEqual, propertyGPR, scratchGPR)); - jit.load32(CCallHelpers::Address(scratch2GPR, StringImpl::flagsOffset()), scratchGPR); - jit.loadPtr(CCallHelpers::Address(scratch2GPR, StringImpl::dataOffset()), scratch2GPR); - auto is16Bit = jit.branchTest32(CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(StringImpl::flagIs8Bit())); - jit.zeroExtend32ToWord(propertyGPR, scratchGPR); - jit.load8(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesOne, 0), scratch2GPR); - auto is8BitLoadDone = jit.jump(); - is16Bit.link(&jit); - jit.zeroExtend32ToWord(propertyGPR, scratchGPR); - jit.load16(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesTwo, 0), scratch2GPR); - is8BitLoadDone.link(&jit); - - failAndIgnore.append(jit.branch32(CCallHelpers::Above, scratch2GPR, CCallHelpers::TrustedImm32(maxSingleCharacterString))); - jit.move(CCallHelpers::TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), scratchGPR); - jit.loadPtr(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::ScalePtr, 0), valueRegs.payloadGPR()); - jit.boxCell(valueRegs.payloadGPR(), valueRegs); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else { + jit.load32(CCallHelpers::Address(scratch2GPR, StringImpl::flagsOffset()), scratchGPR); + jit.loadPtr(CCallHelpers::Address(scratch2GPR, StringImpl::dataOffset()), scratch2GPR); + auto is16Bit = jit.branchTest32(CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(StringImpl::flagIs8Bit())); + jit.zeroExtend32ToWord(propertyGPR, scratchGPR); + jit.load8(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesOne, 0), scratch2GPR); + auto is8BitLoadDone = jit.jump(); + is16Bit.link(&jit); + jit.zeroExtend32ToWord(propertyGPR, scratchGPR); + jit.load16(CCallHelpers::BaseIndex(scratch2GPR, scratchGPR, CCallHelpers::TimesTwo, 0), scratch2GPR); + is8BitLoadDone.link(&jit); + + failAndIgnore.append(jit.branch32(CCallHelpers::Above, scratch2GPR, CCallHelpers::TrustedImm32(maxSingleCharacterString))); + jit.move(CCallHelpers::TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), scratchGPR); + jit.loadPtr(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::ScalePtr, 0), valueRegs.payloadGPR()); + jit.boxCell(valueRegs.payloadGPR(), valueRegs); + } + allocator.restoreReusedRegistersByPopping(jit, preservedState); succeed(); @@ -800,7 +1626,8 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers return; } - case AccessCase::IndexedNoIndexingMiss: { + case AccessCase::IndexedNoIndexingMiss: + case AccessCase::IndexedNoIndexingInMiss: { emitDefaultGuard(); GPRReg propertyGPR = stubInfo.propertyGPR(); m_failAndIgnore.append(jit.branch32(CCallHelpers::LessThan, propertyGPR, CCallHelpers::TrustedImm32(0))); @@ -810,7 +1637,11 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers case AccessCase::IndexedInt32Load: case AccessCase::IndexedDoubleLoad: case AccessCase::IndexedContiguousLoad: - case AccessCase::IndexedArrayStorageLoad: { + case AccessCase::IndexedArrayStorageLoad: + case AccessCase::IndexedInt32InHit: + case AccessCase::IndexedDoubleInHit: + case AccessCase::IndexedContiguousInHit: + case AccessCase::IndexedArrayStorageInHit: { ASSERT(!accessCase.viaGlobalProxy()); // This code is written such that the result could alias with the base or the property. GPRReg propertyGPR = stubInfo.propertyGPR(); @@ -832,7 +1663,7 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); }; - if (accessCase.m_type == AccessCase::IndexedArrayStorageLoad) { + if (accessCase.m_type == AccessCase::IndexedArrayStorageLoad || accessCase.m_type == AccessCase::IndexedArrayStorageInHit) { jit.add32(CCallHelpers::TrustedImm32(-ArrayStorageShape), scratchGPR, scratchGPR); fallThrough.append(jit.branch32(CCallHelpers::Above, scratchGPR, CCallHelpers::TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); @@ -845,24 +1676,34 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers #if USE(JSVALUE64) jit.loadValue(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::TimesEight, ArrayStorage::vectorOffset()), JSValueRegs(scratchGPR)); failAndIgnore.append(jit.branchIfEmpty(scratchGPR)); - jit.move(scratchGPR, valueRegs.payloadGPR()); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else + jit.move(scratchGPR, valueRegs.payloadGPR()); #else jit.loadValue(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::TimesEight, ArrayStorage::vectorOffset()), JSValueRegs(scratch3GPR, scratchGPR)); failAndIgnore.append(jit.branchIfEmpty(scratch3GPR)); - jit.move(scratchGPR, valueRegs.payloadGPR()); - jit.move(scratch3GPR, valueRegs.tagGPR()); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else { + jit.move(scratchGPR, valueRegs.payloadGPR()); + jit.move(scratch3GPR, valueRegs.tagGPR()); + } #endif } else { IndexingType expectedShape; switch (accessCase.m_type) { case AccessCase::IndexedInt32Load: + case AccessCase::IndexedInt32InHit: expectedShape = Int32Shape; break; case AccessCase::IndexedDoubleLoad: + case AccessCase::IndexedDoubleInHit: ASSERT(Options::allowDoubleShape()); expectedShape = DoubleShape; break; case AccessCase::IndexedContiguousLoad: + case AccessCase::IndexedContiguousInHit: expectedShape = ContiguousShape; break; default: @@ -877,21 +1718,31 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); failAndIgnore.append(jit.branch32(CCallHelpers::AboveOrEqual, propertyGPR, CCallHelpers::Address(scratchGPR, Butterfly::offsetOfPublicLength()))); jit.zeroExtend32ToWord(propertyGPR, scratch2GPR); - if (accessCase.m_type == AccessCase::IndexedDoubleLoad) { + if (accessCase.m_type == AccessCase::IndexedDoubleLoad || accessCase.m_type == AccessCase::IndexedDoubleInHit) { RELEASE_ASSERT(m_scratchFPR != InvalidFPRReg); jit.loadDouble(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::TimesEight), m_scratchFPR); failAndIgnore.append(jit.branchIfNaN(m_scratchFPR)); - jit.boxDouble(m_scratchFPR, valueRegs); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else + jit.boxDouble(m_scratchFPR, valueRegs); } else { #if USE(JSVALUE64) jit.loadValue(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::TimesEight), JSValueRegs(scratchGPR)); failAndIgnore.append(jit.branchIfEmpty(scratchGPR)); - jit.move(scratchGPR, valueRegs.payloadGPR()); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else + jit.move(scratchGPR, valueRegs.payloadGPR()); #else jit.loadValue(CCallHelpers::BaseIndex(scratchGPR, scratch2GPR, CCallHelpers::TimesEight), JSValueRegs(scratch3GPR, scratchGPR)); failAndIgnore.append(jit.branchIfEmpty(scratch3GPR)); - jit.move(scratchGPR, valueRegs.payloadGPR()); - jit.move(scratch3GPR, valueRegs.tagGPR()); + if (forInBy(accessCase.m_type)) + jit.moveTrustedValue(jsBoolean(true), valueRegs); + else { + jit.move(scratchGPR, valueRegs.payloadGPR()); + jit.move(scratch3GPR, valueRegs.tagGPR()); + } #endif } } @@ -1138,7 +1989,7 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers jit.load32(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfLength()), scratchGPR); #endif jit.loadPtr(CCallHelpers::Address(baseGPR, JSArrayBufferView::offsetOfVector()), scratch2GPR); - jit.cageConditionallyAndUntag(Gigacage::Primitive, scratch2GPR, scratchGPR, scratchGPR, false); + jit.cageConditionally(Gigacage::Primitive, scratch2GPR, scratchGPR, scratchGPR); jit.signExtend32ToPtr(propertyGPR, scratchGPR); if (isInt(type)) { if (isClamped(type)) { @@ -1283,7 +2134,6 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers GPRReg scratch2GPR = allocator.allocateScratchGPR(); GPRReg scratch3GPR = allocator.allocateScratchGPR(); GPRReg scratch4GPR = allocator.allocateScratchGPR(); - GPRReg scratch5GPR = allocator.allocateScratchGPR(); ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); @@ -1294,11 +2144,11 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers slowCases.append(jit.branchIfNotString(propertyGPR)); } - jit.loadPtr(CCallHelpers::Address(propertyGPR, JSString::offsetOfValue()), scratch5GPR); - slowCases.append(jit.branchIfRopeStringImpl(scratch5GPR)); - slowCases.append(jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(scratch5GPR, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIsAtom()))); + jit.loadPtr(CCallHelpers::Address(propertyGPR, JSString::offsetOfValue()), scratch4GPR); + slowCases.append(jit.branchIfRopeStringImpl(scratch4GPR)); + slowCases.append(jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(scratch4GPR, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIsAtom()))); - slowCases.append(jit.loadMegamorphicProperty(vm, baseGPR, scratch5GPR, nullptr, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR, scratch4GPR)); + slowCases.append(jit.loadMegamorphicProperty(vm, baseGPR, scratch4GPR, nullptr, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR)); allocator.restoreReusedRegistersByPopping(jit, preservedState); succeed(); @@ -1322,11 +2172,10 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers auto allocator = makeDefaultScratchAllocator(scratchGPR); GPRReg scratch2GPR = allocator.allocateScratchGPR(); GPRReg scratch3GPR = allocator.allocateScratchGPR(); - GPRReg scratch4GPR = allocator.allocateScratchGPR(); ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); - auto slowCases = jit.loadMegamorphicProperty(vm, baseGPR, InvalidGPRReg, uid, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR, scratch4GPR); + auto slowCases = jit.loadMegamorphicProperty(vm, baseGPR, InvalidGPRReg, uid, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR); allocator.restoreReusedRegistersByPopping(jit, preservedState); succeed(); @@ -1347,6 +2196,60 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers CCallHelpers::JumpList failAndRepatch; auto* uid = accessCase.m_identifier.uid(); + auto allocator = makeDefaultScratchAllocator(scratchGPR); + GPRReg scratch2GPR = allocator.allocateScratchGPR(); + GPRReg scratch3GPR = allocator.allocateScratchGPR(); + + ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); + + auto slowCases = jit.storeMegamorphicProperty(vm, baseGPR, InvalidGPRReg, uid, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR); + + allocator.restoreReusedRegistersByPopping(jit, preservedState); + succeed(); + + if (allocator.didReuseRegisters()) { + slowCases.link(&jit); + allocator.restoreReusedRegistersByPopping(jit, preservedState); + m_failAndRepatch.append(jit.jump()); + } else + m_failAndRepatch.append(slowCases); +#endif + return; + } + + case AccessCase::InMegamorphic: { +#if USE(JSVALUE64) + ASSERT(!accessCase.viaGlobalProxy()); + CCallHelpers::JumpList failAndRepatch; + auto* uid = accessCase.m_identifier.uid(); + + auto allocator = makeDefaultScratchAllocator(scratchGPR); + GPRReg scratch2GPR = allocator.allocateScratchGPR(); + GPRReg scratch3GPR = allocator.allocateScratchGPR(); + + ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); + + auto slowCases = jit.hasMegamorphicProperty(vm, baseGPR, InvalidGPRReg, uid, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR); + + allocator.restoreReusedRegistersByPopping(jit, preservedState); + succeed(); + + if (allocator.didReuseRegisters()) { + slowCases.link(&jit); + allocator.restoreReusedRegistersByPopping(jit, preservedState); + m_failAndRepatch.append(jit.jump()); + } else + m_failAndRepatch.append(slowCases); +#endif + return; + } + + case AccessCase::IndexedMegamorphicIn: { +#if USE(JSVALUE64) + ASSERT(!accessCase.viaGlobalProxy()); + + CCallHelpers::JumpList slowCases; + auto allocator = makeDefaultScratchAllocator(scratchGPR); GPRReg scratch2GPR = allocator.allocateScratchGPR(); GPRReg scratch3GPR = allocator.allocateScratchGPR(); @@ -1354,7 +2257,18 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); - auto slowCases = jit.storeMegamorphicProperty(vm, baseGPR, InvalidGPRReg, uid, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR, scratch4GPR); + CCallHelpers::JumpList notString; + GPRReg propertyGPR = m_stubInfo->propertyGPR(); + if (!m_stubInfo->propertyIsString) { + slowCases.append(jit.branchIfNotCell(propertyGPR)); + slowCases.append(jit.branchIfNotString(propertyGPR)); + } + + jit.loadPtr(CCallHelpers::Address(propertyGPR, JSString::offsetOfValue()), scratch4GPR); + slowCases.append(jit.branchIfRopeStringImpl(scratch4GPR)); + slowCases.append(jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(scratch4GPR, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIsAtom()))); + + slowCases.append(jit.hasMegamorphicProperty(vm, baseGPR, scratch4GPR, nullptr, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR)); allocator.restoreReusedRegistersByPopping(jit, preservedState); succeed(); @@ -1379,7 +2293,6 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers GPRReg scratch2GPR = allocator.allocateScratchGPR(); GPRReg scratch3GPR = allocator.allocateScratchGPR(); GPRReg scratch4GPR = allocator.allocateScratchGPR(); - GPRReg scratch5GPR = allocator.allocateScratchGPR(); ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); @@ -1390,11 +2303,11 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers slowCases.append(jit.branchIfNotString(propertyGPR)); } - jit.loadPtr(CCallHelpers::Address(propertyGPR, JSString::offsetOfValue()), scratch5GPR); - slowCases.append(jit.branchIfRopeStringImpl(scratch5GPR)); - slowCases.append(jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(scratch5GPR, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIsAtom()))); + jit.loadPtr(CCallHelpers::Address(propertyGPR, JSString::offsetOfValue()), scratch4GPR); + slowCases.append(jit.branchIfRopeStringImpl(scratch4GPR)); + slowCases.append(jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(scratch4GPR, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIsAtom()))); - slowCases.append(jit.storeMegamorphicProperty(vm, baseGPR, scratch5GPR, nullptr, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR, scratch4GPR)); + slowCases.append(jit.storeMegamorphicProperty(vm, baseGPR, scratch4GPR, nullptr, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR)); allocator.restoreReusedRegistersByPopping(jit, preservedState); succeed(); @@ -1633,9 +2546,11 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) this->restoreLiveRegistersFromStackForCall(spillState, dontRestore); }; - jit.store32( - CCallHelpers::TrustedImm32(callSiteIndexForExceptionHandlingOrOriginal().bits()), - CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + if (codeBlock->useDataIC()) { + callSiteIndexForExceptionHandlingOrOriginal(); + jit.transfer32(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCallSiteIndex()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + } else + jit.store32(CCallHelpers::TrustedImm32(callSiteIndexForExceptionHandlingOrOriginal().bits()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); if (accessCase.m_type == AccessCase::Getter || accessCase.m_type == AccessCase::Setter) { auto& access = accessCase.as(); @@ -1661,7 +2576,7 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) setSpillStateForJSCall(spillState); RELEASE_ASSERT(!access.callLinkInfo()); - auto* callLinkInfo = m_callLinkInfos.add(stubInfo.codeOrigin, codeBlock->useDataIC() ? CallLinkInfo::UseDataIC::Yes : CallLinkInfo::UseDataIC::No); + auto* callLinkInfo = m_callLinkInfos.add(stubInfo.codeOrigin, codeBlock->useDataIC() ? CallLinkInfo::UseDataIC::Yes : CallLinkInfo::UseDataIC::No, nullptr); access.m_callLinkInfo = callLinkInfo; // FIXME: If we generated a polymorphic call stub that jumped back to the getter @@ -1674,7 +2589,7 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) // https://bugs.webkit.org/show_bug.cgi?id=148914 callLinkInfo->disallowStubs(); - callLinkInfo->setUpCall(CallLinkInfo::Call, loadedValueGPR); + callLinkInfo->setUpCall(CallLinkInfo::Call); CCallHelpers::JumpList done; @@ -1736,27 +2651,27 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) virtualRegisterForArgumentIncludingThis(1).offset() * sizeof(Register))); } - auto slowCase = CallLinkInfo::emitFastPath(jit, callLinkInfo, loadedValueGPR, loadedValueGPR == GPRInfo::regT2 ? GPRInfo::regT0 : GPRInfo::regT2); - auto doneLocation = jit.label(); - - if (accessCase.m_type == AccessCase::Getter) - jit.setupResults(valueRegs); - done.append(jit.jump()); - - slowCase.link(&jit); - auto slowPathStart = jit.label(); - jit.move(loadedValueGPR, GPRInfo::regT0); + jit.move(loadedValueGPR, BaselineJITRegisters::Call::calleeJSR.payloadGPR()); #if USE(JSVALUE32_64) // We *always* know that the getter/setter, if non-null, is a cell. - jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1); + jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), BaselineJITRegisters::Call::calleeJSR.tagGPR()); #endif - jit.move(CCallHelpers::TrustedImmPtr(globalObject), GPRInfo::regT3); - callLinkInfo->emitSlowPath(vm, jit); + auto [slowCase, dispatchLabel] = CallLinkInfo::emitFastPath(jit, callLinkInfo); + auto doneLocation = jit.label(); if (accessCase.m_type == AccessCase::Getter) jit.setupResults(valueRegs); done.append(jit.jump()); + if (!slowCase.empty()) { + slowCase.link(&jit); + CallLinkInfo::emitSlowPath(vm, jit, callLinkInfo); + + if (accessCase.m_type == AccessCase::Getter) + jit.setupResults(valueRegs); + done.append(jit.jump()); + } + if (returnUndefined) { ASSERT(accessCase.m_type == AccessCase::Getter); returnUndefined.value().link(&jit); @@ -1764,16 +2679,23 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) } done.link(&jit); - int stackPointerOffset = (codeBlock->stackPointerOffset() * sizeof(Register)) - m_preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation; - jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + if (codeBlock->useDataIC()) { + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfStackOffset()), m_scratchGPR); + if (useHandlerIC()) + jit.addPtr(CCallHelpers::TrustedImm32(-(sizeof(CallerFrameAndPC) + maxFrameExtentForSlowPathCall + m_preservedReusedRegisterState.numberOfBytesPreserved + spillState.numberOfStackBytesUsedForRegisterPreservation)), m_scratchGPR); + else + jit.addPtr(CCallHelpers::TrustedImm32(-(m_preservedReusedRegisterState.numberOfBytesPreserved + spillState.numberOfStackBytesUsedForRegisterPreservation)), m_scratchGPR); + jit.addPtr(m_scratchGPR, GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } else { + int stackPointerOffset = (codeBlock->stackPointerOffset() * sizeof(Register)) - m_preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation; + jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } bool callHasReturnValue = accessCase.isGetter(); restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue); jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - callLinkInfo->setCodeLocations( - linkBuffer.locationOf(slowPathStart), - linkBuffer.locationOf(doneLocation)); + callLinkInfo->setCodeLocations(linkBuffer.locationOf(doneLocation)); }); } else { ASSERT(accessCase.m_type == AccessCase::CustomValueGetter || accessCase.m_type == AccessCase::CustomAccessorGetter || accessCase.m_type == AccessCase::CustomValueSetter || accessCase.m_type == AccessCase::CustomAccessorSetter); @@ -1830,22 +2752,15 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) } jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame); - if (Options::useJITCage()) - operationCall = jit.call(OperationPtrTag); - else - operationCall = jit.call(CustomAccessorPtrTag); - auto type = accessCase.m_type; auto customAccessor = accessCase.as().m_customAccessor; - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - if (Options::useJITCage()) { - if (type == AccessCase::CustomValueGetter || type == AccessCase::CustomAccessorGetter) - linkBuffer.link(operationCall, vmEntryCustomGetter); - else - linkBuffer.link(operationCall, vmEntryCustomSetter); - } else - linkBuffer.link(operationCall, customAccessor); - }); + if (Options::useJITCage()) { + if (type == AccessCase::CustomValueGetter || type == AccessCase::CustomAccessorGetter) + jit.callOperation(vmEntryCustomGetter); + else + jit.callOperation(vmEntryCustomSetter); + } else + jit.callOperation(customAccessor); if (accessCase.m_type == AccessCase::CustomValueGetter || accessCase.m_type == AccessCase::CustomAccessorGetter) jit.setupResults(valueRegs); @@ -1900,10 +2815,7 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), scratchGPR); jit.prepareCallOperation(vm); - auto operationCall = jit.call(OperationPtrTag); - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link(operationCall, operationWriteBarrierSlowPath); - }); + jit.callOperation(operationWriteBarrierSlowPath); restoreLiveRegistersFromStackForCall(spillState); skipBarrier.link(&jit); @@ -1984,35 +2896,24 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) extraRegistersToPreserve.add(valueRegs, IgnoreVectors); InlineCacheCompiler::SpillState spillState = preserveLiveRegistersToStackForCall(extraRegistersToPreserve); - jit.store32( - CCallHelpers::TrustedImm32( - callSiteIndexForExceptionHandlingOrOriginal().bits()), - CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + if (codeBlock->useDataIC()) { + callSiteIndexForExceptionHandlingOrOriginal(); + jit.transfer32(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCallSiteIndex()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + } else + jit.store32(CCallHelpers::TrustedImm32(callSiteIndexForExceptionHandlingOrOriginal().bits()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); jit.makeSpaceOnStackForCCall(); if (!reallocating) { jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), baseGPR); jit.prepareCallOperation(vm); - - CCallHelpers::Call operationCall = jit.call(OperationPtrTag); - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link( - operationCall, - operationReallocateButterflyToHavePropertyStorageWithInitialCapacity); - }); + jit.callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity); } else { // Handle the case where we are reallocating (i.e. the old structure/butterfly // already had out-of-line property storage). jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue))); jit.prepareCallOperation(vm); - - CCallHelpers::Call operationCall = jit.call(OperationPtrTag); - jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - linkBuffer.link( - operationCall, - operationReallocateButterflyToGrowPropertyStorage); - }); + jit.callOperation(operationReallocateButterflyToGrowPropertyStorage); } jit.reclaimSpaceOnStackForCCall(); @@ -2079,23 +2980,12 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) ASSERT(accessCase.structure()->transitionWatchpointSetHasBeenInvalidated()); ASSERT(accessCase.newStructure()->transitionKind() == TransitionKind::PropertyDeletion); ASSERT(baseGPR != scratchGPR); - ASSERT(!valueRegs.uses(baseGPR)); - ASSERT(!valueRegs.uses(scratchGPR)); - - jit.moveValue(JSValue(), valueRegs); - if (isInlineOffset(accessCase.m_offset)) { - jit.storeValue( - valueRegs, - CCallHelpers::Address( - baseGPR, - JSObject::offsetOfInlineStorage() + - offsetInInlineStorage(accessCase.m_offset) * sizeof(JSValue))); - } else { + if (isInlineOffset(accessCase.m_offset)) + jit.storeTrustedValue(JSValue(), CCallHelpers::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(accessCase.m_offset) * sizeof(JSValue))); + else { jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); - jit.storeValue( - valueRegs, - CCallHelpers::Address(scratchGPR, offsetInButterfly(accessCase.m_offset) * sizeof(JSValue))); + jit.storeTrustedValue(JSValue(), CCallHelpers::Address(scratchGPR, offsetInButterfly(accessCase.m_offset) * sizeof(JSValue))); } uint32_t structureBits = bitwise_cast(accessCase.newStructure()->id()); @@ -2164,6 +3054,11 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) succeed(); return; + case AccessCase::IndexedNoIndexingInMiss: + jit.moveTrustedValue(jsBoolean(false), valueRegs); + succeed(); + return; + case AccessCase::DirectArgumentsLength: case AccessCase::ScopedArgumentsLength: case AccessCase::ModuleNamespaceLoad: @@ -2173,9 +3068,11 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) case AccessCase::InstanceOfGeneric: case AccessCase::LoadMegamorphic: case AccessCase::StoreMegamorphic: + case AccessCase::InMegamorphic: case AccessCase::IndexedProxyObjectLoad: case AccessCase::IndexedMegamorphicLoad: case AccessCase::IndexedMegamorphicStore: + case AccessCase::IndexedMegamorphicIn: case AccessCase::IndexedInt32Load: case AccessCase::IndexedDoubleLoad: case AccessCase::IndexedContiguousLoad: @@ -2224,6 +3121,31 @@ void InlineCacheCompiler::generateImpl(AccessCase& accessCase) case AccessCase::IndexedResizableTypedArrayUint32Store: case AccessCase::IndexedResizableTypedArrayFloat32Store: case AccessCase::IndexedResizableTypedArrayFloat64Store: + case AccessCase::IndexedInt32InHit: + case AccessCase::IndexedDoubleInHit: + case AccessCase::IndexedContiguousInHit: + case AccessCase::IndexedArrayStorageInHit: + case AccessCase::IndexedScopedArgumentsInHit: + case AccessCase::IndexedDirectArgumentsInHit: + case AccessCase::IndexedTypedArrayInt8InHit: + case AccessCase::IndexedTypedArrayUint8InHit: + case AccessCase::IndexedTypedArrayUint8ClampedInHit: + case AccessCase::IndexedTypedArrayInt16InHit: + case AccessCase::IndexedTypedArrayUint16InHit: + case AccessCase::IndexedTypedArrayInt32InHit: + case AccessCase::IndexedTypedArrayUint32InHit: + case AccessCase::IndexedTypedArrayFloat32InHit: + case AccessCase::IndexedTypedArrayFloat64InHit: + case AccessCase::IndexedResizableTypedArrayInt8InHit: + case AccessCase::IndexedResizableTypedArrayUint8InHit: + case AccessCase::IndexedResizableTypedArrayUint8ClampedInHit: + case AccessCase::IndexedResizableTypedArrayInt16InHit: + case AccessCase::IndexedResizableTypedArrayUint16InHit: + case AccessCase::IndexedResizableTypedArrayInt32InHit: + case AccessCase::IndexedResizableTypedArrayUint32InHit: + case AccessCase::IndexedResizableTypedArrayFloat32InHit: + case AccessCase::IndexedResizableTypedArrayFloat64InHit: + case AccessCase::IndexedStringInHit: // These need to be handled by generateWithGuard(), since the guard is part of the // algorithm. We can be sure that nobody will call generate() directly for these since they // are not guarded by structure checks. @@ -2241,6 +3163,12 @@ void InlineCacheCompiler::emitDOMJITGetter(GetterSetterAccessCase& accessCase, c GPRReg baseGPR = stubInfo.m_baseGPR; GPRReg scratchGPR = m_scratchGPR; + if (jit.codeBlock()->useDataIC()) { + callSiteIndexForExceptionHandlingOrOriginal(); + jit.transfer32(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCallSiteIndex()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + } else + jit.store32(CCallHelpers::TrustedImm32(callSiteIndexForExceptionHandlingOrOriginal().bits()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + // We construct the environment that can execute the DOMJIT::Snippet here. Ref snippet = domJIT->compiler()(); @@ -2385,19 +3313,21 @@ void InlineCacheCompiler::emitProxyObjectAccess(ProxyObjectAccessCase& accessCas InlineCacheCompiler::SpillState spillState = preserveLiveRegistersToStackForCall(); - jit.store32( - CCallHelpers::TrustedImm32(callSiteIndexForExceptionHandlingOrOriginal().bits()), - CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + if (codeBlock->useDataIC()) { + callSiteIndexForExceptionHandlingOrOriginal(); + jit.transfer32(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCallSiteIndex()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); + } else + jit.store32(CCallHelpers::TrustedImm32(callSiteIndexForExceptionHandlingOrOriginal().bits()), CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); setSpillStateForJSCall(spillState); ASSERT(!accessCase.callLinkInfo()); - auto* callLinkInfo = m_callLinkInfos.add(stubInfo.codeOrigin, codeBlock->useDataIC() ? CallLinkInfo::UseDataIC::Yes : CallLinkInfo::UseDataIC::No); + auto* callLinkInfo = m_callLinkInfos.add(stubInfo.codeOrigin, codeBlock->useDataIC() ? CallLinkInfo::UseDataIC::Yes : CallLinkInfo::UseDataIC::No, nullptr); accessCase.m_callLinkInfo = callLinkInfo; callLinkInfo->disallowStubs(); - callLinkInfo->setUpCall(CallLinkInfo::Call, scratchGPR); + callLinkInfo->setUpCall(CallLinkInfo::Call); unsigned numberOfParameters; JSFunction* proxyInternalMethod = nullptr; @@ -2463,31 +3393,40 @@ void InlineCacheCompiler::emitProxyObjectAccess(ProxyObjectAccessCase& accessCas jit.move(CCallHelpers::TrustedImmPtr(proxyInternalMethod), scratchGPR); jit.storeCell(scratchGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register))); - auto slowCase = CallLinkInfo::emitFastPath(jit, callLinkInfo, scratchGPR, scratchGPR == GPRInfo::regT2 ? GPRInfo::regT0 : GPRInfo::regT2); + jit.move(scratchGPR, BaselineJITRegisters::Call::calleeJSR.payloadGPR()); +#if USE(JSVALUE32_64) + // We *always* know that the proxy function, if non-null, is a cell. + jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), BaselineJITRegisters::Call::calleeJSR.tagGPR()); +#endif + auto [slowCase, dispatchLabel] = CallLinkInfo::emitFastPath(jit, callLinkInfo); auto doneLocation = jit.label(); if (accessCase.m_type != AccessCase::ProxyObjectStore) jit.setupResults(valueRegs); - auto done = jit.jump(); + if (!slowCase.empty()) { + auto done = jit.jump(); - slowCase.link(&jit); - auto slowPathStart = jit.label(); - jit.move(scratchGPR, GPRInfo::regT0); -#if USE(JSVALUE32_64) - // We *always* know that the proxy function, if non-null, is a cell. - jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1); -#endif - jit.move(CCallHelpers::TrustedImmPtr(globalObject), GPRInfo::regT3); - callLinkInfo->emitSlowPath(vm, jit); + slowCase.link(&jit); + CallLinkInfo::emitSlowPath(vm, jit, callLinkInfo); - if (accessCase.m_type != AccessCase::ProxyObjectStore) - jit.setupResults(valueRegs); + if (accessCase.m_type != AccessCase::ProxyObjectStore) + jit.setupResults(valueRegs); - done.link(&jit); + done.link(&jit); + } - int stackPointerOffset = (codeBlock->stackPointerOffset() * sizeof(Register)) - m_preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation; - jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + if (codeBlock->useDataIC()) { + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfStackOffset()), m_scratchGPR); + if (useHandlerIC()) + jit.addPtr(CCallHelpers::TrustedImm32(-(sizeof(CallerFrameAndPC) + maxFrameExtentForSlowPathCall + m_preservedReusedRegisterState.numberOfBytesPreserved + spillState.numberOfStackBytesUsedForRegisterPreservation)), m_scratchGPR); + else + jit.addPtr(CCallHelpers::TrustedImm32(-(m_preservedReusedRegisterState.numberOfBytesPreserved + spillState.numberOfStackBytesUsedForRegisterPreservation)), m_scratchGPR); + jit.addPtr(m_scratchGPR, GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } else { + int stackPointerOffset = (codeBlock->stackPointerOffset() * sizeof(Register)) - m_preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation; + jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } RegisterSet dontRestore; if (accessCase.m_type != AccessCase::ProxyObjectStore) { @@ -2498,9 +3437,7 @@ void InlineCacheCompiler::emitProxyObjectAccess(ProxyObjectAccessCase& accessCas restoreLiveRegistersFromStackForCall(spillState, dontRestore); jit.addLinkTask([=] (LinkBuffer& linkBuffer) { - callLinkInfo->setCodeLocations( - linkBuffer.locationOf(slowPathStart), - linkBuffer.locationOf(doneLocation)); + callLinkInfo->setCodeLocations(linkBuffer.locationOf(doneLocation)); }); succeed(); } @@ -2531,6 +3468,13 @@ bool InlineCacheCompiler::canEmitIntrinsicGetter(StructureStubInfo& stubInfo, JS TypeInfo info = structure->typeInfo(); return info.isObject() && !info.overridesGetPrototype(); } + case SpeciesGetterIntrinsic: { +#if USE(JSVALUE32_64) + return false; +#else + return !structure->classInfoForCells()->isSubClassOf(JSScope::info()); +#endif + } case WebAssemblyInstanceExportsIntrinsic: return structure->typeInfo().type() == WebAssemblyInstanceType; default: @@ -2668,6 +3612,12 @@ void InlineCacheCompiler::emitIntrinsicGetter(IntrinsicGetterAccessCase& accessC return; } + case SpeciesGetterIntrinsic: { + jit.moveValueRegs(stubInfo.baseRegs(), valueRegs); + succeed(); + return; + } + case WebAssemblyInstanceExportsIntrinsic: { #if ENABLE(WEBASSEMBLY) jit.loadPtr(CCallHelpers::Address(baseGPR, JSWebAssemblyInstance::offsetOfModuleRecord()), valueGPR); @@ -2710,6 +3660,21 @@ static inline bool canUseMegamorphicPutFastPath(Structure* structure) } } +static inline ASCIILiteral categoryName(AccessType type) +{ + switch (type) { +#define JSC_DEFINE_ACCESS_TYPE_CASE(name) \ + case AccessType::name: \ + return #name ""_s; \ + + JSC_FOR_EACH_STRUCTURE_STUB_INFO_ACCESS_TYPE(JSC_DEFINE_ACCESS_TYPE_CASE) + +#undef JSC_DEFINE_ACCESS_TYPE_CASE + } + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; +} + AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSLocker& locker, PolymorphicAccess& poly, CodeBlock* codeBlock) { SuperSamplerScope superSamplerScope(false); @@ -2762,7 +3727,7 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL if (isGenerated) poly.m_list[dstIndex++] = WTFMove(someCase); } - poly.m_list.resize(dstIndex); + poly.m_list.shrink(dstIndex); bool generatedMegamorphicCode = false; @@ -2929,6 +3894,69 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL } break; } + case AccessType::InById: { + auto identifier = cases.last()->m_identifier; + bool allAreSimpleHitOrMiss = true; + for (auto& accessCase : cases) { + if (accessCase->type() != AccessCase::InHit && accessCase->type() != AccessCase::InMiss) { + allAreSimpleHitOrMiss = false; + break; + } + if (accessCase->usesPolyProto()) { + allAreSimpleHitOrMiss = false; + break; + } + if (accessCase->viaGlobalProxy()) { + allAreSimpleHitOrMiss = false; + break; + } + } + + // Currently, we do not apply megamorphic cache for "length" property since Array#length and String#length are too common. + if (!canUseMegamorphicInById(vm(), identifier.uid())) + allAreSimpleHitOrMiss = false; + +#if USE(JSVALUE32_64) + allAreSimpleHitOrMiss = false; +#endif + + if (allAreSimpleHitOrMiss) { + while (!cases.isEmpty()) + poly.m_list.append(cases.takeLast()); + cases.append(AccessCase::create(vm(), codeBlock, AccessCase::InMegamorphic, identifier)); + generatedMegamorphicCode = true; + } + break; + } + case AccessType::InByVal: { + bool allAreSimpleHitOrMiss = true; + for (auto& accessCase : cases) { + if (accessCase->type() != AccessCase::InHit && accessCase->type() != AccessCase::InMiss) { + allAreSimpleHitOrMiss = false; + break; + } + if (accessCase->usesPolyProto()) { + allAreSimpleHitOrMiss = false; + break; + } + if (accessCase->viaGlobalProxy()) { + allAreSimpleHitOrMiss = false; + break; + } + } + +#if USE(JSVALUE32_64) + allAreSimpleHitOrMiss = false; +#endif + + if (allAreSimpleHitOrMiss) { + while (!cases.isEmpty()) + poly.m_list.append(cases.takeLast()); + cases.append(AccessCase::create(vm(), codeBlock, AccessCase::IndexedMegamorphicIn, nullptr)); + generatedMegamorphicCode = true; + } + break; + } default: break; } @@ -2936,12 +3964,37 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL dataLogLnIf(InlineCacheCompilerInternal::verbose, "Optimized cases: ", listDump(cases)); + auto finishCodeGeneration = [&](Ref&& stub) { + auto handler = InlineCacheHandler::create(WTFMove(stub), WTFMove(m_watchpoints)); + dataLogLnIf(InlineCacheCompilerInternal::verbose, "Returning: ", handler->callTarget()); + + poly.m_list = WTFMove(cases); + poly.m_list.shrinkToFit(); + + AccessGenerationResult::Kind resultKind; + if (generatedMegamorphicCode) + resultKind = AccessGenerationResult::GeneratedMegamorphicCode; + else if (poly.m_list.size() >= Options::maxAccessVariantListSize()) + resultKind = AccessGenerationResult::GeneratedFinalCode; + else + resultKind = AccessGenerationResult::GeneratedNewCode; + + return AccessGenerationResult(resultKind, WTFMove(handler)); + }; + + if (generatedMegamorphicCode && useHandlerIC()) { + ASSERT(codeBlock->useDataIC()); + auto stub = vm().m_sharedJITStubs->getMegamorphic(m_stubInfo->accessType); + if (stub) + return finishCodeGeneration(stub.releaseNonNull()); + } + auto allocator = makeDefaultScratchAllocator(); m_allocator = &allocator; m_scratchGPR = allocator.allocateScratchGPR(); for (auto& accessCase : cases) { - if (accessCase->needsScratchFPR()) { + if (needsScratchFPR(accessCase->m_type)) { m_scratchFPR = allocator.allocateScratchFPR(); break; } @@ -3010,30 +4063,20 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL if (needsSymbolPropertyCheck || needsStringPropertyCheck || needsInt32PropertyCheck) canBeShared = false; - auto finishCodeGeneration = [&](RefPtr&& stub) { - poly.m_stubRoutine = WTFMove(stub); - poly.m_watchpoints = WTFMove(m_watchpoints); - dataLogLnIf(InlineCacheCompilerInternal::verbose, "Returning: ", poly.m_stubRoutine->code()); - - poly.m_list = WTFMove(cases); - poly.m_list.shrinkToFit(); - - AccessGenerationResult::Kind resultKind; - if (generatedMegamorphicCode) - resultKind = AccessGenerationResult::GeneratedMegamorphicCode; - else if (poly.m_list.size() >= Options::maxAccessVariantListSize()) - resultKind = AccessGenerationResult::GeneratedFinalCode; - else - resultKind = AccessGenerationResult::GeneratedNewCode; - - return AccessGenerationResult(resultKind, poly.m_stubRoutine->code().code()); - }; - CCallHelpers jit(codeBlock); m_jit = &jit; + if (useHandlerIC()) + emitDataICPrologue(*m_jit); + if (!canBeShared && ASSERT_ENABLED) { - jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, jit.scratchRegister()); + if (codeBlock->useDataIC()) { + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfStackOffset()), jit.scratchRegister()); + jit.addPtr(jit.scratchRegister(), GPRInfo::callFrameRegister, jit.scratchRegister()); + if (useHandlerIC()) + jit.addPtr(CCallHelpers::TrustedImm32(-static_cast(sizeof(CallerFrameAndPC) + maxFrameExtentForSlowPathCall)), jit.scratchRegister()); + } else + jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, jit.scratchRegister()); auto ok = jit.branchPtr(CCallHelpers::Equal, CCallHelpers::stackPointerRegister, jit.scratchRegister()); jit.breakpoint(); ok.link(&jit); @@ -3221,25 +4264,26 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL MacroAssembler::Label makeshiftCatchHandler = jit.label(); JIT_COMMENT(jit, "exception handler"); - int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue); - InlineCacheCompiler::SpillState spillStateForJSCall = this->spillStateForJSCall(); - ASSERT(!spillStateForJSCall.isEmpty()); - stackPointerOffset -= m_preservedReusedRegisterState.numberOfBytesPreserved; - stackPointerOffset -= spillStateForJSCall.numberOfStackBytesUsedForRegisterPreservation; - + InlineCacheCompiler::SpillState spillState = this->spillStateForJSCall(); + ASSERT(!spillState.isEmpty()); jit.loadPtr(vm().addressOfCallFrameForCatch(), GPRInfo::callFrameRegister); - jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + if (codeBlock->useDataIC()) { + ASSERT(!JITCode::isBaselineCode(m_jitType)); + jit.loadPtr(CCallHelpers::Address(GPRInfo::jitDataRegister, BaselineJITData::offsetOfStackOffset()), m_scratchGPR); + jit.addPtr(CCallHelpers::TrustedImm32(-(m_preservedReusedRegisterState.numberOfBytesPreserved + spillState.numberOfStackBytesUsedForRegisterPreservation)), m_scratchGPR); + jit.addPtr(m_scratchGPR, GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } else { + int stackPointerOffset = (codeBlock->stackPointerOffset() * sizeof(Register)) - m_preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation; + jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } - restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSCall); + restoreLiveRegistersFromStackForCallWithThrownException(spillState); restoreScratch(); - CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump(); - HandlerInfo oldHandler = originalExceptionHandler(); + jit.jumpThunk(oldHandler.nativeCode); DisposableCallSiteIndex newExceptionHandlingCallSite = this->callSiteIndexForExceptionHandling(); jit.addLinkTask( [=] (LinkBuffer& linkBuffer) { - linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode); - HandlerInfo handlerToRegister = oldHandler; handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler); handlerToRegister.start = newExceptionHandlingCallSite.bits(); @@ -3250,18 +4294,22 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL // We set these to indicate to the stub to remove itself from the CodeBlock's // exception handler table when it is deallocated. codeBlockThatOwnsExceptionHandlers = codeBlock; - ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType())); + ASSERT(JSC::JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType())); callSiteIndexForExceptionHandling = this->callSiteIndexForExceptionHandling(); } + CodeLocationLabel successLabel = m_stubInfo->doneLocation; if (codeBlock->useDataIC()) { - failure.link(&jit); - JIT_COMMENT(jit, "failure far jump"); - // In ARM64, we do not push anything on stack specially. - // So we can just jump to the slow-path even though this thunk is called (not jumped). - // FIXME: We should tail call to the thunk which calls the slow path function. - // And we should eliminate IC slow-path generation in BaselineJIT. - jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfSlowPathStartLocation()), JITStubRoutinePtrTag); + if (useHandlerIC()) + failure.linkThunk(CodeLocationLabel(CodePtr { (generateSlowPathCode(vm(), m_stubInfo->accessType).retaggedCode().dataLocation() + prologueSizeInBytesDataIC) }), &jit); + else { + failure.link(&jit); + JIT_COMMENT(jit, "failure far jump"); + jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfSlowPathStartLocation()), JITStubRoutinePtrTag); + } + } else { + m_success.linkThunk(successLabel, &jit); + failure.linkThunk(m_stubInfo->slowPathStartLocation, &jit); } RefPtr stub; @@ -3281,7 +4329,7 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL stub = vm().m_sharedJITStubs->find(searcher); if (stub) { dataLogLnIf(InlineCacheCompilerInternal::verbose, "Found existing code stub ", stub->code()); - return finishCodeGeneration(WTFMove(stub)); + return finishCodeGeneration(stub.releaseNonNull()); } } @@ -3291,33 +4339,39 @@ AccessGenerationResult InlineCacheCompiler::regenerate(const GCSafeConcurrentJSL return AccessGenerationResult::GaveUp; } - CodeLocationLabel successLabel = m_stubInfo->doneLocation; if (codeBlock->useDataIC()) ASSERT(m_success.empty()); - else { - linkBuffer.link(m_success, successLabel); - linkBuffer.link(failure, m_stubInfo->slowPathStartLocation); - } dataLogLnIf(InlineCacheCompilerInternal::verbose, FullCodeOrigin(codeBlock, m_stubInfo->codeOrigin), ": Generating polymorphic access stub for ", listDump(cases)); - MacroAssemblerCodeRef code = FINALIZE_CODE_FOR( - codeBlock, linkBuffer, JITStubRoutinePtrTag, - "%s", toCString("Access stub for ", *codeBlock, " ", m_stubInfo->codeOrigin, "with start: ", m_stubInfo->startLocation, " with return point ", successLabel, ": ", listDump(cases)).data()); + MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(codeBlock, linkBuffer, JITStubRoutinePtrTag, categoryName(m_stubInfo->accessType), "%s", toCString("Access stub for ", *codeBlock, " ", m_stubInfo->codeOrigin, "with start: ", m_stubInfo->startLocation, " with return point ", successLabel, ": ", listDump(cases)).data()); + + CodeBlock* owner = codeBlock; + if (generatedMegamorphicCode && useHandlerIC()) { + ASSERT(codeBlock->useDataIC()); + ASSERT(!doesCalls); + ASSERT(cellsToMark.isEmpty()); + owner = nullptr; + } + + stub = createICJITStubRoutine(code, WTFMove(keys), WTFMove(weakStructures), vm(), owner, doesCalls, cellsToMark, WTFMove(m_callLinkInfos), codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling); - stub = createICJITStubRoutine(code, WTFMove(keys), WTFMove(weakStructures), vm(), codeBlock, doesCalls, cellsToMark, WTFMove(m_callLinkInfos), codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling); + if (generatedMegamorphicCode && useHandlerIC()) { + ASSERT(codeBlock->useDataIC()); + vm().m_sharedJITStubs->setMegamorphic(m_stubInfo->accessType, *stub); + } if (codeBlock->useDataIC()) { if (canBeShared) vm().m_sharedJITStubs->add(SharedJITStubSet::Hash::Key(m_stubInfo->m_baseGPR, m_stubInfo->m_valueGPR, m_stubInfo->m_extraGPR, m_stubInfo->m_extra2GPR, m_stubInfo->m_stubInfoGPR, m_stubInfo->m_arrayProfileGPR, m_stubInfo->usedRegisters, stub.get())); } - return finishCodeGeneration(WTFMove(stub)); + return finishCodeGeneration(stub.releaseNonNull()); } -PolymorphicAccess::PolymorphicAccess() { } -PolymorphicAccess::~PolymorphicAccess() { } +PolymorphicAccess::PolymorphicAccess() = default; +PolymorphicAccess::~PolymorphicAccess() = default; AccessGenerationResult PolymorphicAccess::addCases( const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, @@ -3426,13 +4480,6 @@ bool PolymorphicAccess::visitWeak(VM& vm) const if (!at(i).visitWeak(vm)) return false; } - if (m_stubRoutine) { - for (StructureID weakReference : m_stubRoutine->weakStructures()) { - Structure* structure = weakReference.decode(); - if (!vm.heap.isMarked(structure)) - return false; - } - } return true; } @@ -3464,12 +4511,26 @@ void PolymorphicAccess::dump(PrintStream& out) const out.print("]"); } -void PolymorphicAccess::aboutToDie() +void InlineCacheHandler::aboutToDie() { if (m_stubRoutine) m_stubRoutine->aboutToDie(); } +bool InlineCacheHandler::visitWeak(VM& vm) const +{ + if (!m_stubRoutine) + return true; + + for (StructureID weakReference : m_stubRoutine->weakStructures()) { + Structure* structure = weakReference.decode(); + if (!vm.heap.isMarked(structure)) + return false; + } + + return true; +} + } // namespace JSC namespace WTF { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.h index 4e21251c..6af288e1 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCacheCompiler.h @@ -32,6 +32,7 @@ #include "JSFunctionInlines.h" #include "MacroAssembler.h" #include "ScratchRegisterAllocator.h" +#include "StructureStubClearingWatchpoint.h" #include #include @@ -45,7 +46,7 @@ class CodeBlock; class PolymorphicAccess; class ProxyObjectAccessCase; class StructureStubInfo; -class WatchpointsOnStructureStubInfo; +class InlineCacheHandler; DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(PolymorphicAccess); @@ -74,28 +75,10 @@ class AccessGenerationResult { RELEASE_ASSERT(kind != GeneratedMegamorphicCode); } - AccessGenerationResult(Kind kind, CodePtr code) - : m_kind(kind) - , m_code(code) - { - RELEASE_ASSERT(kind == GeneratedNewCode || kind == GeneratedFinalCode || kind == GeneratedMegamorphicCode); - RELEASE_ASSERT(code); - } - - bool operator==(const AccessGenerationResult& other) const - { - return m_kind == other.m_kind && m_code == other.m_code; - } - - explicit operator bool() const - { - return *this != AccessGenerationResult(); - } + AccessGenerationResult(Kind, Ref&&); Kind kind() const { return m_kind; } - const CodePtr& code() const { return m_code; } - bool madeNoChanges() const { return m_kind == MadeNoChanges; } bool gaveUp() const { return m_kind == GaveUp; } bool buffered() const { return m_kind == Buffered; } @@ -123,9 +106,11 @@ class AccessGenerationResult { pair.first.invalidate(vm, pair.second); } + InlineCacheHandler* handler() const { return m_handler.get(); } + private: - Kind m_kind; - CodePtr m_code; + Kind m_kind { MadeNoChanges }; + RefPtr m_handler; Vector> m_watchpointsToFire; }; @@ -160,9 +145,37 @@ class PolymorphicAccess { // optimization to then avoid calling this method again during the fixpoint. template void propagateTransitions(Visitor&) const; - void aboutToDie(); - void dump(PrintStream& out) const; + +private: + friend class AccessCase; + friend class CodeBlock; + friend class InlineCacheCompiler; + + typedef Vector, 2> ListType; + + ListType m_list; + RefPtr m_stubRoutine; + std::unique_ptr m_watchpoints; +}; + +class InlineCacheHandler final : public RefCounted { + WTF_MAKE_NONCOPYABLE(InlineCacheHandler); + friend class InlineCacheCompiler; +public: + static ptrdiff_t offsetOfCallTarget() { return OBJECT_OFFSETOF(InlineCacheHandler, m_callTarget); } + static ptrdiff_t offsetOfJumpTarget() { return OBJECT_OFFSETOF(InlineCacheHandler, m_jumpTarget); } + static ptrdiff_t offsetOfNext() { return OBJECT_OFFSETOF(InlineCacheHandler, m_next); } + + static Ref create(Ref&& stubRoutine, std::unique_ptr&& watchpoints) + { + return adoptRef(*new InlineCacheHandler(WTFMove(stubRoutine), WTFMove(watchpoints))); + } + + CodePtr callTarget() const { return m_callTarget; } + CodePtr jumpTarget() const { return m_jumpTarget; } + + void aboutToDie(); bool containsPC(void* pc) const { if (!m_stubRoutine) @@ -172,16 +185,24 @@ class PolymorphicAccess { return m_stubRoutine->startAddress() <= pcAsInt && pcAsInt <= m_stubRoutine->endAddress(); } + // If this returns false then we are requesting a reset of the owning StructureStubInfo. + bool visitWeak(VM&) const; + + void dump(PrintStream&) const; + + static Ref createNonHandlerSlowPath(CodePtr); + private: - friend class AccessCase; - friend class CodeBlock; - friend class InlineCacheCompiler; + InlineCacheHandler() = default; + InlineCacheHandler(Ref&&, std::unique_ptr&&); - typedef Vector, 2> ListType; + static Ref createSlowPath(VM&, AccessType); - ListType m_list; + CodePtr m_callTarget; + CodePtr m_jumpTarget; RefPtr m_stubRoutine; std::unique_ptr m_watchpoints; + RefPtr m_next; }; inline bool canUseMegamorphicGetById(VM& vm, UniquedStringImpl* uid) @@ -189,19 +210,31 @@ inline bool canUseMegamorphicGetById(VM& vm, UniquedStringImpl* uid) return !parseIndex(*uid) && uid != vm.propertyNames->length && uid != vm.propertyNames->name && uid != vm.propertyNames->prototype && uid != vm.propertyNames->underscoreProto; } +inline bool canUseMegamorphicInById(VM& vm, UniquedStringImpl* uid) +{ + return canUseMegamorphicGetById(vm, uid); +} + inline bool canUseMegamorphicPutById(VM& vm, UniquedStringImpl* uid) { return !parseIndex(*uid) && uid != vm.propertyNames->underscoreProto; } +inline AccessGenerationResult::AccessGenerationResult(Kind kind, Ref&& handler) + : m_kind(kind) + , m_handler(WTFMove(handler)) +{ + RELEASE_ASSERT(kind == GeneratedNewCode || kind == GeneratedFinalCode || kind == GeneratedMegamorphicCode); +} class InlineCacheCompiler { public: - InlineCacheCompiler(VM& vm, JSGlobalObject* globalObject, ECMAMode ecmaMode, StructureStubInfo& stubInfo) + InlineCacheCompiler(JITType jitType, VM& vm, JSGlobalObject* globalObject, ECMAMode ecmaMode, StructureStubInfo& stubInfo) : m_vm(vm) , m_globalObject(globalObject) , m_stubInfo(&stubInfo) , m_ecmaMode(ecmaMode) + , m_jitType(jitType) { } @@ -234,7 +267,6 @@ class InlineCacheCompiler { const ScalarRegisterSet& liveRegistersForCall(); - CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal(); DisposableCallSiteIndex callSiteIndexForExceptionHandling(); const HandlerInfo& originalExceptionHandler(); @@ -285,7 +317,16 @@ class InlineCacheCompiler { AccessGenerationResult regenerate(const GCSafeConcurrentJSLocker&, PolymorphicAccess&, CodeBlock*); + static MacroAssemblerCodeRef generateSlowPathCode(VM&, AccessType); + static Ref generateSlowPathHandler(VM&, AccessType); + + static void emitDataICPrologue(CCallHelpers&); + static void emitDataICEpilogue(CCallHelpers&); + + bool useHandlerIC() const; + private: + CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal(); const ScalarRegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite(); void emitDOMJITGetter(GetterSetterAccessCase&, const DOMJIT::GetterSetter*, GPRReg baseForGetGPR); @@ -297,6 +338,7 @@ class InlineCacheCompiler { JSGlobalObject* const m_globalObject; StructureStubInfo* m_stubInfo { nullptr }; const ECMAMode m_ecmaMode { ECMAMode::sloppy() }; + JITType m_jitType; CCallHelpers* m_jit { nullptr }; ScratchRegisterAllocator* m_allocator { nullptr }; MacroAssembler::JumpList m_success; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp index 62198ffb..bb22d879 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp @@ -31,6 +31,8 @@ namespace JSC { +DEFINE_COMPACT_ALLOCATOR_WITH_HEAP_IDENTIFIER(InlineCallFrame); + JSFunction* InlineCallFrame::calleeConstant() const { if (calleeRecovery.isConstant()) diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.h index 7612a8ca..f78776ed 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrame.h @@ -40,7 +40,11 @@ struct InlineCallFrame; class CallFrame; class JSFunction; +DECLARE_COMPACT_ALLOCATOR_WITH_HEAP_IDENTIFIER(InlineCallFrame); + struct InlineCallFrame { + WTF_MAKE_STRUCT_FAST_COMPACT_ALLOCATED_WITH_HEAP_IDENTIFIER(InlineCallFrame); + enum Kind { Call, Construct, diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h index 88c55e3b..1acbbfe1 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h @@ -25,13 +25,13 @@ #pragma once +#include "InlineCallFrame.h" + #include #include namespace JSC { -struct InlineCallFrame; - class InlineCallFrameSet : public RefCounted { public: InlineCallFrameSet(); @@ -40,13 +40,14 @@ class InlineCallFrameSet : public RefCounted { bool isEmpty() const { return m_frames.isEmpty(); } InlineCallFrame* add(); - - typedef Bag::iterator iterator; + + using FrameBag = Bag, InlineCallFrameMalloc>; + typedef FrameBag::iterator iterator; iterator begin() { return m_frames.begin(); } iterator end() { return m_frames.end(); } private: - Bag m_frames; + FrameBag m_frames; }; } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfStatus.h index c053b06e..efa4b58c 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018 Apple Inc. All rights reserved. + * Copyright (C) 2018-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include "ICStatusMap.h" #include "InstanceOfVariant.h" #include "StubInfoSummary.h" +#include namespace JSC { @@ -37,7 +38,7 @@ class CodeBlock; class StructureStubInfo; class InstanceOfStatus final { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(InstanceOfStatus); public: enum State { // It's uncached so we have no information. diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfVariant.h index bdaff5a8..5bd3c698 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/InstanceOfVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018 Apple Inc. All rights reserved. + * Copyright (C) 2018-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,13 +27,14 @@ #include "ObjectPropertyConditionSet.h" #include "StructureSet.h" +#include namespace JSC { class InstanceOfStatus; class InstanceOfVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(InstanceOfVariant); public: InstanceOfVariant() = default; InstanceOfVariant(const StructureSet&, const ObjectPropertyConditionSet&, JSObject* prototype, bool isHit); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/LineColumn.h b/vendor/webkit/Source/JavaScriptCore/bytecode/LineColumn.h new file mode 100644 index 00000000..2cdf6c82 --- /dev/null +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/LineColumn.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2024 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +namespace JSC { + +struct LineColumn { + bool operator==(const LineColumn& other) const + { + return line == other.line && column == other.column; + } + + unsigned line { 0 }; + unsigned column { 0 }; +}; + +} // namespace JSC + diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/LinkTimeConstant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/LinkTimeConstant.h index 4f5f2b50..3c9482d1 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/LinkTimeConstant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/LinkTimeConstant.h @@ -51,16 +51,15 @@ class JSGlobalObject; v(makeTypeError, nullptr) \ v(AggregateError, nullptr) \ v(typedArrayLength, nullptr) \ - v(typedArrayClone, nullptr) \ v(typedArrayContentType, nullptr) \ v(typedArrayGetOriginalConstructor, nullptr) \ - v(typedArraySort, nullptr) \ + v(toIntegerOrInfinity, nullptr) \ + v(toLength, nullptr) \ v(isTypedArrayView, nullptr) \ v(isSharedTypedArrayView, nullptr) \ v(isResizableOrGrowableSharedTypedArrayView, nullptr) \ v(typedArrayFromFast, nullptr) \ v(isDetached, nullptr) \ - v(typedArrayDefaultComparator, nullptr) \ v(isBoundFunction, nullptr) \ v(hasInstanceBoundFunction, nullptr) \ v(instanceOf, nullptr) \ @@ -68,7 +67,6 @@ class JSGlobalObject; v(BuiltinDescribe, nullptr) \ v(RegExp, nullptr) \ v(min, nullptr) \ - v(trunc, nullptr) \ v(Promise, nullptr) \ v(InternalPromise, nullptr) \ v(defaultPromiseThen, nullptr) \ @@ -82,6 +80,7 @@ class JSGlobalObject; v(Map, nullptr) \ v(importMapStatus, nullptr) \ v(importInRealm, nullptr) \ + v(evalFunction, nullptr) \ v(evalInRealm, nullptr) \ v(moveFunctionToRealm, nullptr) \ v(isConstructor, nullptr) \ @@ -122,6 +121,7 @@ class JSGlobalObject; v(sentinelString, nullptr) \ v(createRemoteFunction, nullptr) \ v(isRemoteFunction, nullptr) \ + v(arrayFromFast, nullptr) \ v(arraySort, nullptr) \ v(jsonParse, nullptr) \ v(jsonStringify, nullptr) \ diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.cpp index ad01cb72..51aadf8c 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.cpp @@ -70,7 +70,7 @@ void MetadataTable::destroy(MetadataTable* table) size_t MetadataTable::sizeInBytesForGC() { - return linkingData().unlinkedMetadata->sizeInBytesForGC(*this); + return unlinkedMetadata().sizeInBytesForGC(*this); } void MetadataTable::validate() const diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.h b/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.h index 172bbee3..697dd5ea 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/MetadataTable.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018 Apple Inc. All rights reserved. + * Copyright (C) 2018-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "UnlinkedMetadataTable.h" #include "ValueProfile.h" #include +#include namespace JSC { @@ -40,7 +41,7 @@ class CodeBlock; // ^ // The pointer of MetadataTable points at this address. class MetadataTable { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(MetadataTable); WTF_MAKE_NONCOPYABLE(MetadataTable); friend class LLIntOffsetsExtractor; friend class UnlinkedMetadataTable; @@ -70,7 +71,7 @@ class MetadataTable { ALWAYS_INLINE void forEachValueProfile(const Functor& func) { // We could do a checked multiply here but if it overflows we'd just not look at any value profiles so it's probably not worth it. - int lastValueProfileOffset = -linkingData().unlinkedMetadata->m_numValueProfiles; + int lastValueProfileOffset = -unlinkedMetadata().m_numValueProfiles; for (int i = -1; i >= lastValueProfileOffset; --i) func(valueProfilesEnd()[i]); } @@ -82,7 +83,7 @@ class MetadataTable { ValueProfile& valueProfileForOffset(unsigned profileOffset) { - ASSERT(profileOffset <= linkingData().unlinkedMetadata->m_numValueProfiles); + ASSERT(profileOffset <= unlinkedMetadata().m_numValueProfiles); return valueProfilesEnd()[-static_cast(profileOffset)]; } @@ -123,6 +124,8 @@ class MetadataTable { void validate() const; + UnlinkedMetadataTable& unlinkedMetadata() const { return linkingData().unlinkedMetadata.get(); } + private: MetadataTable(UnlinkedMetadataTable&); @@ -131,7 +134,7 @@ class MetadataTable { size_t totalSize() const { - return linkingData().unlinkedMetadata->m_numValueProfiles * sizeof(ValueProfile) + sizeof(UnlinkedMetadataTable::LinkingData) + getOffset(UnlinkedMetadataTable::s_offsetTableEntries - 1); + return unlinkedMetadata().m_numValueProfiles * sizeof(ValueProfile) + sizeof(UnlinkedMetadataTable::LinkingData) + getOffset(UnlinkedMetadataTable::s_offsetTableEntries - 1); } UnlinkedMetadataTable::LinkingData& linkingData() const diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp index 26a8e003..c3c54631 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp @@ -138,6 +138,18 @@ bool ObjectPropertyConditionSet::structuresEnsureValidity() const return true; } +bool ObjectPropertyConditionSet::isStillValid() const +{ + if (!isValid()) + return false; + + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.isStillValid(Concurrency::ConcurrentThread)) + return false; + } + return true; +} + bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const { for (const ObjectPropertyCondition& condition : *this) { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h index 77efc630..ccc85596 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h @@ -132,6 +132,7 @@ class ObjectPropertyConditionSet { // invalid(). ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const; + bool isStillValid() const; bool structuresEnsureValidity() const; bool needImpurePropertyWatchpoint() const; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/PropertyCondition.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/PropertyCondition.cpp index 18f69b54..816b44bb 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/PropertyCondition.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/PropertyCondition.cpp @@ -175,6 +175,12 @@ bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint( return false; } + if (structure->typeInfo().overridesPut() && JSObject::mightBeSpecialProperty(structure->vm(), structure->typeInfo().type(), uid())) { + if (PropertyConditionInternal::verbose) + dataLog("Invalid because its put() override may treat ", uid(), " property as special non-structure one.\n"); + return false; + } + unsigned currentAttributes; PropertyOffset currentOffset = structure->get(structure->vm(), concurrency, uid(), currentAttributes); if (currentOffset != invalidOffset) { @@ -186,10 +192,6 @@ bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint( } return false; } - } else if (structure->typeInfo().overridesPut() && JSObject::mightBeSpecialProperty(structure->vm(), structure->typeInfo().type(), uid())) { - if (PropertyConditionInternal::verbose) - dataLog("Invalid because its put() override may treat ", uid(), " property as special non-structure one.\n"); - return false; } else if (structure->hasNonReifiedStaticProperties()) { if (auto entry = structure->findPropertyHashEntry(uid())) { if (entry->value->attributes() & PropertyAttribute::ReadOnlyOrAccessorOrCustomAccessorOrValue) { diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.cpp index 9a444adf..44f8ed19 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.cpp @@ -62,6 +62,7 @@ PutByStatus PutByStatus::computeFromLLInt(CodeBlock* profiledBlock, BytecodeInde switch (instruction->opcodeID()) { case op_put_by_id: break; + case op_enumerator_put_by_val: case op_put_by_val: case op_put_by_val_direct: return PutByStatus(NoInformation); @@ -251,7 +252,29 @@ PutByStatus PutByStatus::computeForStubInfo(const ConcurrentJSLocker& locker, Co return PutByStatus(JSC::slowVersion(summary), *stubInfo); break; } - + + case AccessCase::CustomAccessorSetter: { + auto conditionSet = access.conditionSet(); + if (!conditionSet.isStillValid()) + continue; + + Structure* currStructure = access.hasAlternateBase() ? access.alternateBase()->structure() : access.structure(); + // For now, we only support cases which JSGlobalObject is the same to the currently profiledBlock. + if (currStructure->globalObject() != profiledBlock->globalObject()) + return PutByStatus(JSC::slowVersion(summary), *stubInfo); + + auto customAccessorSetter = access.as().customAccessor(); + std::unique_ptr domAttribute; + if (access.as().domAttribute()) + domAttribute = WTF::makeUnique(*access.as().domAttribute()); + result.m_state = CustomAccessor; + + auto variant = PutByVariant::customSetter(access.identifier(), access.structure(), WTFMove(conditionSet), customAccessorSetter, WTFMove(domAttribute)); + if (!result.appendVariant(variant)) + return PutByStatus(JSC::slowVersion(summary), *stubInfo); + break; + } + case AccessCase::Setter: { Structure* structure = access.structure(); @@ -260,27 +283,27 @@ PutByStatus PutByStatus::computeForStubInfo(const ConcurrentJSLocker& locker, Co switch (complexGetStatus.kind()) { case ComplexGetStatus::ShouldSkip: continue; - + case ComplexGetStatus::TakesSlowPath: return PutByStatus(JSC::slowVersion(summary), *stubInfo); - + case ComplexGetStatus::Inlineable: { auto callLinkStatus = makeUnique(); if (CallLinkInfo* callLinkInfo = access.as().callLinkInfo()) { *callLinkStatus = CallLinkStatus::computeFor( locker, profiledBlock, *callLinkInfo, callExitSiteData); } - + auto variant = PutByVariant::setter(access.identifier(), structure, complexGetStatus.offset(), complexGetStatus.conditionSet(), WTFMove(callLinkStatus)); if (!result.appendVariant(variant)) return PutByStatus(JSC::slowVersion(summary), *stubInfo); + break; } } break; } - + case AccessCase::CustomValueSetter: - case AccessCase::CustomAccessorSetter: return PutByStatus(MakesCalls); case AccessCase::ProxyObjectStore: { @@ -455,6 +478,7 @@ bool PutByStatus::makesCalls() const case MakesCalls: case ObservedSlowPathAndMakesCalls: case Megamorphic: + case CustomAccessor: return true; case Simple: { for (unsigned i = m_variants.size(); i--;) { @@ -536,7 +560,8 @@ void PutByStatus::merge(const PutByStatus& other) return; case Simple: - if (other.m_state != Simple) + case CustomAccessor: + if (other.m_state != m_state) return mergeSlow(); for (const PutByVariant& other : other.m_variants) { @@ -569,31 +594,34 @@ void PutByStatus::filter(const StructureSet& set) void PutByStatus::dump(PrintStream& out) const { + out.print("("); switch (m_state) { case NoInformation: - out.print("(NoInformation)"); + out.print("NoInformation"); return; case Simple: - out.print("(", listDump(m_variants), ")"); - return; + out.print("Simple"); + break; + case CustomAccessor: + out.print("CustomAccessor"); + break; case Megamorphic: out.print("Megamorphic"); - return; + break; case LikelyTakesSlowPath: out.print("LikelyTakesSlowPath"); - return; + break; case ObservedTakesSlowPath: out.print("ObservedTakesSlowPath"); - return; + break; case MakesCalls: out.print("MakesCalls"); - return; + break; case ObservedSlowPathAndMakesCalls: out.print("ObservedSlowPathAndMakesCalls"); - return; + break; } - - RELEASE_ASSERT_NOT_REACHED(); + out.print(", ", listDump(m_variants), ")"); } } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.h index 6aa8adad..69332105 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByStatus.h @@ -53,6 +53,8 @@ class PutByStatus final { NoInformation, // It's cached as a simple store of some kind. Simple, + // It's cached for a custom accessor with a possible structure chain. + CustomAccessor, // It's cached for a megamorphic case. Megamorphic, // It will likely take the slow path. @@ -111,10 +113,12 @@ class PutByStatus final { bool isSet() const { return m_state != NoInformation; } bool operator!() const { return m_state == NoInformation; } bool isSimple() const { return m_state == Simple; } + bool isCustomAccessor() const { return m_state == CustomAccessor; } bool isMegamorphic() const { return m_state == Megamorphic; } bool takesSlowPath() const { switch (m_state) { + case CustomAccessor: case Megamorphic: case LikelyTakesSlowPath: case ObservedTakesSlowPath: diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.cpp index dee2d71d..e7e82174 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.cpp @@ -49,6 +49,11 @@ PutByVariant& PutByVariant::operator=(const PutByVariant& other) m_callLinkStatus = makeUnique(*other.m_callLinkStatus); else m_callLinkStatus = nullptr; + m_customAccessorSetter = other.m_customAccessorSetter; + if (other.m_domAttribute) + m_domAttribute = WTF::makeUnique(*other.m_domAttribute); + else + m_domAttribute = nullptr; m_identifier = other.m_identifier; return *this; } @@ -84,6 +89,18 @@ PutByVariant PutByVariant::setter(CacheableIdentifier identifier, const Structur return result; } +PutByVariant PutByVariant::customSetter(CacheableIdentifier identifier, const StructureSet& structure, const ObjectPropertyConditionSet& conditionSet, CodePtr customAccessorSetter, std::unique_ptr&& domAttribute) +{ + PutByVariant result(WTFMove(identifier)); + result.m_kind = CustomAccessorSetter; + result.m_oldStructure = structure; + result.m_conditionSet = conditionSet; + result.m_offset = invalidOffset; + result.m_customAccessorSetter = customAccessorSetter; + result.m_domAttribute = WTFMove(domAttribute); + return result; +} + PutByVariant PutByVariant::proxy(CacheableIdentifier identifier, const StructureSet& structure, std::unique_ptr callLinkStatus) { PutByVariant result(WTFMove(identifier)); @@ -130,6 +147,7 @@ bool PutByVariant::writesStructures() const switch (kind()) { case Transition: case Setter: + case CustomAccessorSetter: case Proxy: return true; default: @@ -143,6 +161,7 @@ bool PutByVariant::reallocatesStorage() const case Transition: return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity(); case Setter: + case CustomAccessorSetter: case Proxy: return true; default: @@ -152,7 +171,7 @@ bool PutByVariant::reallocatesStorage() const bool PutByVariant::makesCalls() const { - return kind() == Setter || kind() == Proxy; + return kind() == Setter || kind() == CustomAccessorSetter || kind() == Proxy; } bool PutByVariant::attemptToMerge(const PutByVariant& other) @@ -207,6 +226,9 @@ bool PutByVariant::attemptToMerge(const PutByVariant& other) if (m_newStructure != other.m_newStructure) return false; + if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty()) + return false; + ObjectPropertyConditionSet mergedConditionSet; if (!m_conditionSet.isEmpty()) { mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet); @@ -248,6 +270,35 @@ bool PutByVariant::attemptToMerge(const PutByVariant& other) return true; } + case CustomAccessorSetter: { + if (other.m_kind != CustomAccessorSetter) + return false; + + if (m_customAccessorSetter != other.m_customAccessorSetter) + return false; + + if (m_domAttribute || other.m_domAttribute) { + if (!(m_domAttribute && other.m_domAttribute)) + return false; + if (*m_domAttribute != *other.m_domAttribute) + return false; + } + + if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty()) + return false; + + ObjectPropertyConditionSet mergedConditionSet; + if (!m_conditionSet.isEmpty()) { + mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet); + if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition()) + return false; + } + m_conditionSet = mergedConditionSet; + + m_oldStructure.merge(other.m_oldStructure); + return true; + } + case Proxy: { if (other.m_kind != Proxy) return false; @@ -359,6 +410,13 @@ void PutByVariant::dumpInContext(PrintStream& out, DumpContext* context) const out.print(">"); return; + case CustomAccessorSetter: + out.print( + "CustomAccessorSetter: ", inContext(structure(), context), ", [", + inContext(m_conditionSet, context), "]"); + out.print(">"); + return; + case Proxy: out.print( "Proxy: ", inContext(structure(), context)); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.h index de3df3cd..5105962f 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/PutByVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014-2021 Apple Inc. All rights reserved. + * Copyright (C) 2014-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,22 +26,23 @@ #pragma once #include "CacheableIdentifier.h" +#include "CallLinkStatus.h" #include "ObjectPropertyConditionSet.h" #include "PropertyOffset.h" #include "StructureSet.h" +#include namespace JSC { -class CallLinkStatus; - class PutByVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(PutByVariant); public: enum Kind { NotSet, Replace, Transition, Setter, + CustomAccessorSetter, Proxy, }; @@ -62,6 +63,8 @@ class PutByVariant { static PutByVariant setter(CacheableIdentifier, const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&, std::unique_ptr); + static PutByVariant customSetter(CacheableIdentifier, const StructureSet&, const ObjectPropertyConditionSet&, CodePtr, std::unique_ptr&&); + static PutByVariant proxy(CacheableIdentifier, const StructureSet&, std::unique_ptr); Kind kind() const { return m_kind; } @@ -71,13 +74,13 @@ class PutByVariant { const StructureSet& structure() const { - ASSERT(kind() == Replace || kind() == Setter || kind() == Proxy); + ASSERT(kind() == Replace || kind() == Setter || kind() == Proxy || kind() == CustomAccessorSetter); return m_oldStructure; } const StructureSet& oldStructure() const { - ASSERT(kind() == Transition || kind() == Replace || kind() == Setter || kind() == Proxy); + ASSERT(kind() == Transition || kind() == Replace || kind() == Setter || kind() == CustomAccessorSetter || kind() == Proxy); return m_oldStructure; } @@ -88,7 +91,7 @@ class PutByVariant { StructureSet& oldStructure() { - ASSERT(kind() == Transition || kind() == Replace || kind() == Setter || kind() == Proxy); + ASSERT(kind() == Transition || kind() == Replace || kind() == Setter || kind() == CustomAccessorSetter || kind() == Proxy); return m_oldStructure; } @@ -153,15 +156,20 @@ class PutByVariant { return structureSet().overlaps(other.structureSet()); } + CodePtr customAccessorSetter() const { return m_customAccessorSetter; } + DOMAttributeAnnotation* domAttribute() const { return m_domAttribute.get(); } + private: bool attemptToMergeTransitionWithReplace(const PutByVariant& replace); Kind m_kind; - PropertyOffset m_offset; + PropertyOffset m_offset { invalidOffset }; StructureSet m_oldStructure; Structure* m_newStructure { nullptr }; ObjectPropertyConditionSet m_conditionSet; std::unique_ptr m_callLinkStatus; + CodePtr m_customAccessorSetter; + std::unique_ptr m_domAttribute; CacheableIdentifier m_identifier; }; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.cpp index 57039c85..87e8ad9e 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.cpp @@ -49,12 +49,14 @@ #include "IntrinsicGetterAccessCase.h" #include "JIT.h" #include "JITInlines.h" +#include "JITThunks.h" #include "JSCInlines.h" #include "JSModuleNamespaceObject.h" #include "JSWebAssembly.h" #include "JSWebAssemblyInstance.h" #include "JSWebAssemblyModule.h" #include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" #include "ModuleNamespaceAccessCase.h" #include "ProxyObjectAccessCase.h" #include "ScopedArguments.h" @@ -96,35 +98,17 @@ static ECMAMode ecmaModeFor(PutByKind putByKind) RELEASE_ASSERT_NOT_REACHED(); } -#endif // ENABLE(JIT) - -static void linkSlowPathTo(VM&, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef) -{ - callLinkInfo.setSlowPathCallDestination(codeRef.code().template retagged()); -} - -#if ENABLE(JIT) -static void linkSlowPathTo(VM& vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator) -{ - linkSlowPathTo(vm, callLinkInfo, vm.getCTIStub(generator).retagged()); -} -#endif - static void linkSlowFor(VM& vm, CallLinkInfo& callLinkInfo) { - MacroAssemblerCodeRef virtualThunk = vm.getCTIVirtualCall(callLinkInfo.callMode()); - linkSlowPathTo(vm, callLinkInfo, virtualThunk); + if (callLinkInfo.type() == CallLinkInfo::Type::Optimizing) + static_cast(callLinkInfo).setSlowPathCallDestination(vm.getCTIVirtualCall(callLinkInfo.callMode()).code().template retagged()); } +#endif -void linkMonomorphicCall( - VM& vm, CallFrame* callFrame, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, - JSObject* callee, CodePtr codePtr) +void linkMonomorphicCall(VM& vm, JSCell* owner, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSObject* callee, CodePtr codePtr) { ASSERT(!callLinkInfo.stub()); - CallFrame* callerFrame = callFrame->callerFrame(); - - JSCell* owner = callerFrame->codeOwnerCell(); CodeBlock* callerCodeBlock = jsDynamicCast(owner); // WebAssembly -> JS stubs don't have a valid CodeBlock. ASSERT(owner); @@ -136,46 +120,13 @@ void linkMonomorphicCall( dataLog("Linking call in ", FullCodeOrigin(callerCodeBlock, callLinkInfo.codeOrigin()), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n"); if (calleeCodeBlock) - calleeCodeBlock->linkIncomingCall(callerFrame, &callLinkInfo); + calleeCodeBlock->linkIncomingCall(owner, &callLinkInfo); #if ENABLE(JIT) - if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) { - linkSlowPathTo(vm, callLinkInfo, linkPolymorphicCallThunkGenerator); + if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) return; - } -#endif - linkSlowFor(vm, callLinkInfo); -} - -static void revertCall(VM& vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef) -{ - if (callLinkInfo.isDirect()) { -#if ENABLE(JIT) - callLinkInfo.clearCodeBlock(); - static_cast(callLinkInfo).initializeDirectCall(); #endif - } else { - linkSlowPathTo(vm, callLinkInfo, codeRef); - - if (callLinkInfo.stub()) - callLinkInfo.revertCallToStub(); - callLinkInfo.clearCallee(); // This also clears the inline cache both for data and code-based caches. - } - callLinkInfo.clearSeen(); - callLinkInfo.clearStub(); - if (callLinkInfo.isOnList()) - callLinkInfo.remove(); -} - -void unlinkCall(VM& vm, CallLinkInfo& callLinkInfo) -{ - dataLogLnIf(Options::dumpDisassembly(), "Unlinking CallLinkInfo: ", RawPointer(&callLinkInfo)); - - if (UNLIKELY(!Options::useLLIntICs() && callLinkInfo.type() == CallLinkInfo::Type::Baseline)) - revertCall(vm, callLinkInfo, vm.getCTIVirtualCall(callLinkInfo.callMode())); - else - revertCall(vm, callLinkInfo, vm.getCTILinkCall().retagged()); } CodePtr jsToWasmICCodePtr(CodeSpecializationKind kind, JSObject* callee) @@ -429,7 +380,7 @@ static InlineCacheAction tryCacheGetBy(JSGlobalObject* globalObject, CodeBlock* LOG_IC((ICEvent::GetBySelfPatch, structure->classInfoForCells(), Identifier::fromUid(vm, propertyName.uid()), slot.slotBase() == baseValue)); structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset()); repatchSlowPathCall(codeBlock, stubInfo, appropriateGetByOptimizeFunction(kind)); - stubInfo.initGetByIdSelf(locker, codeBlock, structure, slot.cachedOffset(), propertyName); + stubInfo.initGetByIdSelf(locker, codeBlock, structure, slot.cachedOffset()); return RetryCacheLater; } } @@ -567,23 +518,7 @@ static InlineCacheAction tryCacheGetBy(JSGlobalObject* globalObject, CodeBlock* if (result.generatedSomeCode()) { LOG_IC((ICEvent::GetByReplaceWithJump, baseValue.classInfoOrNull(), Identifier::fromUid(vm, propertyName.uid()), slot.slotBase() == baseValue)); - - RELEASE_ASSERT(result.code()); - switch (kind) { - case GetByKind::ById: - case GetByKind::ByIdWithThis: - case GetByKind::TryById: - case GetByKind::ByIdDirect: - case GetByKind::PrivateNameById: - InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); - break; - case GetByKind::ByVal: - case GetByKind::ByValWithThis: - case GetByKind::PrivateName: - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); - break; - } - + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -752,9 +687,7 @@ static InlineCacheAction tryCacheArrayGetByVal(JSGlobalObject* globalObject, Cod if (result.generatedSomeCode()) { LOG_IC((ICEvent::GetByReplaceWithJump, baseValue.classInfoOrNull(), Identifier())); - - RELEASE_ASSERT(result.code()); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -930,7 +863,7 @@ static InlineCacheAction tryCachePutBy(JSGlobalObject* globalObject, CodeBlock* if (generatedCodeInline) { LOG_IC((ICEvent::PutBySelfPatch, oldStructure->classInfoForCells(), ident, slot.base() == baseValue)); repatchSlowPathCall(codeBlock, stubInfo, appropriatePutByOptimizeFunction(putByKind)); - stubInfo.initPutByIdReplace(locker, codeBlock, oldStructure, slot.cachedOffset(), propertyName); + stubInfo.initPutByIdReplace(locker, codeBlock, oldStructure, slot.cachedOffset()); return RetryCacheLater; } } @@ -1090,10 +1023,7 @@ static InlineCacheAction tryCachePutBy(JSGlobalObject* globalObject, CodeBlock* if (result.generatedSomeCode()) { LOG_IC((ICEvent::PutByReplaceWithJump, oldStructure->classInfoForCells(), ident, slot.base() == baseValue)); - - RELEASE_ASSERT(result.code()); - - InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1221,9 +1151,7 @@ static InlineCacheAction tryCacheArrayPutByVal(JSGlobalObject* globalObject, Cod if (result.generatedSomeCode()) { LOG_IC((ICEvent::PutByReplaceWithJump, baseValue.classInfoOrNull(), Identifier())); - - RELEASE_ASSERT(result.code()); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1297,9 +1225,8 @@ static InlineCacheAction tryCacheDeleteBy(JSGlobalObject* globalObject, CodeBloc result = stubInfo.addAccessCase(locker, globalObject, codeBlock, ecmaMode, propertyName, WTFMove(newCase)); if (result.generatedSomeCode()) { - RELEASE_ASSERT(result.code()); LOG_IC((ICEvent::DelByReplaceWithJump, oldStructure->classInfoForCells(), Identifier::fromUid(vm, propertyName.uid()))); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1314,10 +1241,20 @@ void repatchDeleteBy(JSGlobalObject* globalObject, CodeBlock* codeBlock, DeleteP if (tryCacheDeleteBy(globalObject, codeBlock, slot, baseValue, oldStructure, propertyName, stubInfo, kind, ecmaMode) == GiveUpOnCache) { LOG_IC((ICEvent::DelByReplaceWithGeneric, baseValue.classInfoOrNull(), Identifier::fromUid(globalObject->vm(), propertyName.uid()))); - if (kind == DelByKind::ById) - repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByIdGaveUp); - else - repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByValGaveUp); + switch (kind) { + case DelByKind::ByIdStrict: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByIdStrictGaveUp); + break; + case DelByKind::ByIdSloppy: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByIdSloppyGaveUp); + break; + case DelByKind::ByValStrict: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByValStrictGaveUp); + break; + case DelByKind::ByValSloppy: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByValSloppyGaveUp); + break; + } } } @@ -1347,6 +1284,13 @@ inline CodePtr appropriateInByGaveUpFunction(InByKind kind) RELEASE_ASSERT_NOT_REACHED(); } +// Mainly used to transition from megamorphic case to generic case. +void repatchInBySlowPathCall(CodeBlock* codeBlock, StructureStubInfo& stubInfo, InByKind kind) +{ + resetInBy(codeBlock, stubInfo, kind); + repatchSlowPathCall(codeBlock, stubInfo, appropriateInByGaveUpFunction(kind)); +} + static InlineCacheAction tryCacheInBy( JSGlobalObject* globalObject, CodeBlock* codeBlock, JSObject* base, CacheableIdentifier propertyName, bool wasFound, const PropertySlot& slot, StructureStubInfo& stubInfo, InByKind kind) @@ -1390,7 +1334,7 @@ static InlineCacheAction tryCacheInBy( LOG_IC((ICEvent::InBySelfPatch, structure->classInfoForCells(), ident, slot.slotBase() == base)); structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset()); repatchSlowPathCall(codeBlock, stubInfo, operationInByIdOptimize); - stubInfo.initInByIdSelf(locker, codeBlock, structure, slot.cachedOffset(), propertyName); + stubInfo.initInByIdSelf(locker, codeBlock, structure, slot.cachedOffset()); return RetryCacheLater; } } @@ -1446,17 +1390,15 @@ static InlineCacheAction tryCacheInBy( if (result.generatedSomeCode()) { LOG_IC((ICEvent::InReplaceWithJump, structure->classInfoForCells(), ident, slot.slotBase() == base)); - - RELEASE_ASSERT(result.code()); - if (kind == InByKind::ById) - InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); - else - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } fireWatchpointsAndClearStubIfNeeded(vm, stubInfo, codeBlock, result); - + + if (result.generatedMegamorphicCode()) + return PromoteToMegamorphic; + return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater; } @@ -1464,9 +1406,28 @@ void repatchInBy(JSGlobalObject* globalObject, CodeBlock* codeBlock, JSObject* b { SuperSamplerScope superSamplerScope(false); - if (tryCacheInBy(globalObject, codeBlock, baseObject, propertyName, wasFound, slot, stubInfo, kind) == GiveUpOnCache) { + switch (tryCacheInBy(globalObject, codeBlock, baseObject, propertyName, wasFound, slot, stubInfo, kind)) { + case PromoteToMegamorphic: { + switch (kind) { + case InByKind::ById: + repatchSlowPathCall(codeBlock, stubInfo, operationInByIdMegamorphic); + break; + case InByKind::ByVal: + repatchSlowPathCall(codeBlock, stubInfo, operationInByValMegamorphic); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + break; + } + case GiveUpOnCache: LOG_IC((ICEvent::InReplaceWithGeneric, baseObject->classInfo(), Identifier::fromUid(globalObject->vm(), propertyName.uid()))); repatchSlowPathCall(codeBlock, stubInfo, appropriateInByGaveUpFunction(kind)); + break; + case RetryCacheLater: + case AttemptToCache: + break; } } @@ -1496,9 +1457,7 @@ static InlineCacheAction tryCacheHasPrivateBrand(JSGlobalObject* globalObject, C if (result.generatedSomeCode()) { LOG_IC((ICEvent::InReplaceWithJump, structure->classInfoForCells(), ident, isBaseProperty)); - - RELEASE_ASSERT(result.code()); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1543,9 +1502,7 @@ static InlineCacheAction tryCacheCheckPrivateBrand( if (result.generatedSomeCode()) { LOG_IC((ICEvent::CheckPrivateBrandReplaceWithJump, structure->classInfoForCells(), ident, isBaseProperty)); - - RELEASE_ASSERT(result.code()); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1602,9 +1559,7 @@ static InlineCacheAction tryCacheSetPrivateBrand( if (result.generatedSomeCode()) { LOG_IC((ICEvent::SetPrivateBrandReplaceWithJump, oldStructure->classInfoForCells(), ident, isBaseProperty)); - - RELEASE_ASSERT(result.code()); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1670,9 +1625,7 @@ static InlineCacheAction tryCacheInstanceOf( if (result.generatedSomeCode()) { LOG_IC((ICEvent::InstanceOfReplaceWithJump, structure->classInfoForCells(), Identifier())); - - RELEASE_ASSERT(result.code()); - InlineAccess::rewireStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo, CodeLocationLabel(result.code())); + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); } } @@ -1681,6 +1634,134 @@ static InlineCacheAction tryCacheInstanceOf( return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater; } +static InlineCacheAction tryCacheArrayInByVal(JSGlobalObject* globalObject, CodeBlock* codeBlock, JSValue baseValue, JSValue index, StructureStubInfo& stubInfo) +{ + ASSERT(baseValue.isCell()); + + if (!index.isInt32()) + return RetryCacheLater; + + VM& vm = globalObject->vm(); + AccessGenerationResult result; + + { + GCSafeConcurrentJSLocker locker(codeBlock->m_lock, globalObject->vm()); + + JSCell* base = baseValue.asCell(); + + RefPtr newCase; + AccessCase::AccessType accessType = AccessCase::IndexedInt32InHit; + if (base->type() == DirectArgumentsType) + accessType = AccessCase::IndexedDirectArgumentsInHit; + else if (base->type() == ScopedArgumentsType) + accessType = AccessCase::IndexedScopedArgumentsInHit; + else if (base->type() == StringType) + accessType = AccessCase::IndexedStringInHit; + else if (isTypedView(base->type())) { + auto* typedArray = jsCast(base); +#if USE(JSVALUE32_64) + if (typedArray->isResizableOrGrowableShared()) + return GiveUpOnCache; +#endif + switch (typedArray->type()) { + case Int8ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayInt8InHit : AccessCase::IndexedTypedArrayInt8InHit; + break; + case Uint8ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayUint8InHit : AccessCase::IndexedTypedArrayUint8InHit; + break; + case Uint8ClampedArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayUint8ClampedInHit : AccessCase::IndexedTypedArrayUint8ClampedInHit; + break; + case Int16ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayInt16InHit : AccessCase::IndexedTypedArrayInt16InHit; + break; + case Uint16ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayUint16InHit : AccessCase::IndexedTypedArrayUint16InHit; + break; + case Int32ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayInt32InHit : AccessCase::IndexedTypedArrayInt32InHit; + break; + case Uint32ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayUint32InHit : AccessCase::IndexedTypedArrayUint32InHit; + break; + case Float32ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayFloat32InHit : AccessCase::IndexedTypedArrayFloat32InHit; + break; + case Float64ArrayType: + accessType = typedArray->isResizableOrGrowableShared() ? AccessCase::IndexedResizableTypedArrayFloat64InHit : AccessCase::IndexedTypedArrayFloat64InHit; + break; + // FIXME: Optimize BigInt64Array / BigUint64Array in IC + // https://bugs.webkit.org/show_bug.cgi?id=221183 + case BigInt64ArrayType: + case BigUint64ArrayType: + return GiveUpOnCache; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } else { + IndexingType indexingShape = base->indexingType() & IndexingShapeMask; + switch (indexingShape) { + case Int32Shape: + accessType = AccessCase::IndexedInt32InHit; + break; + case DoubleShape: + ASSERT(Options::allowDoubleShape()); + accessType = AccessCase::IndexedDoubleInHit; + break; + case ContiguousShape: + accessType = AccessCase::IndexedContiguousInHit; + break; + case ArrayStorageShape: + accessType = AccessCase::IndexedArrayStorageInHit; + break; + case NoIndexingShape: { + if (!base->isObject()) + return GiveUpOnCache; + + if (base->structure()->mayInterceptIndexedAccesses() || base->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) + return GiveUpOnCache; + + // FIXME: prepareChainForCaching is conservative. We should have another function which only cares about information related to this IC. + auto cacheStatus = prepareChainForCaching(globalObject, base, nullptr, nullptr); + if (!cacheStatus) + return GiveUpOnCache; + + if (cacheStatus->usesPolyProto) + return GiveUpOnCache; + + Structure* headStructure = base->structure(); + ObjectPropertyConditionSet conditionSet = generateConditionsForIndexedMiss(vm, codeBlock, globalObject, headStructure); + if (!conditionSet.isValid()) + return GiveUpOnCache; + + newCase = AccessCase::create(vm, codeBlock, AccessCase::IndexedNoIndexingInMiss, nullptr, invalidOffset, headStructure, conditionSet); + break; + } + default: + return GiveUpOnCache; + } + } + + if (!newCase) + newCase = AccessCase::create(vm, codeBlock, accessType, nullptr); + + result = stubInfo.addAccessCase(locker, globalObject, codeBlock, ECMAMode::strict(), nullptr, newCase.releaseNonNull()); + + if (result.generatedSomeCode()) + InlineAccess::rewireStubAsJumpInAccess(codeBlock, stubInfo, *result.handler()); + } + + fireWatchpointsAndClearStubIfNeeded(vm, stubInfo, codeBlock, result); + return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater; +} + +void repatchArrayInByVal(JSGlobalObject* globalObject, CodeBlock* codeBlock, JSValue base, JSValue index, StructureStubInfo& stubInfo, InByKind kind) +{ + if (tryCacheArrayInByVal(globalObject, codeBlock, base, index, stubInfo) == GiveUpOnCache) + repatchSlowPathCall(codeBlock, stubInfo, appropriateInByGaveUpFunction(kind)); +} + void repatchInstanceOf( JSGlobalObject* globalObject, CodeBlock* codeBlock, JSValue valueValue, JSValue prototypeValue, StructureStubInfo& stubInfo, bool wasFound) @@ -1690,67 +1771,27 @@ void repatchInstanceOf( repatchSlowPathCall(codeBlock, stubInfo, operationInstanceOfGaveUp); } -void linkDirectCall( - CallFrame* callFrame, OptimizingCallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, - CodePtr codePtr) +void linkDirectCall(DirectCallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, CodePtr codePtr) { - ASSERT(!callLinkInfo.stub()); - // DirectCall is only used from DFG / FTL. - CodeBlock* callerCodeBlock = callFrame->codeBlock(); - - VM& vm = callerCodeBlock->vm(); - - ASSERT(!callLinkInfo.isLinked()); - callLinkInfo.setCodeBlock(vm, callerCodeBlock, jsCast(calleeCodeBlock)); - if (shouldDumpDisassemblyFor(callerCodeBlock)) - dataLog("Linking call in ", FullCodeOrigin(callerCodeBlock, callLinkInfo.codeOrigin()), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n"); - - callLinkInfo.setDirectCallTarget(jsCast(calleeCodeBlock), CodeLocationLabel(codePtr)); - + callLinkInfo.setCallTarget(jsCast(calleeCodeBlock), CodeLocationLabel(codePtr)); if (calleeCodeBlock) - calleeCodeBlock->linkIncomingCall(callFrame, &callLinkInfo); + calleeCodeBlock->linkIncomingCall(callLinkInfo.owner(), &callLinkInfo); } -static void linkVirtualFor(VM& vm, CallFrame* callFrame, CallLinkInfo& callLinkInfo) -{ - CallFrame* callerFrame = callFrame->callerFrame(); - - JSCell* owner = callerFrame->codeOwnerCell(); - CodeBlock* callerCodeBlock = jsDynamicCast(owner); // WebAssembly -> JS stubs don't have a valid CodeBlock. - - dataLogLnIf(shouldDumpDisassemblyFor(callerCodeBlock), - "Linking virtual call at ", FullCodeOrigin(callerCodeBlock, callerCodeBlock ? callerFrame->codeOrigin() : CodeOrigin { })); - - MacroAssemblerCodeRef virtualThunk = vm.getCTIVirtualCall(callLinkInfo.callMode()); - revertCall(vm, callLinkInfo, virtualThunk); - callLinkInfo.setClearedByVirtual(); -} - -namespace { -struct CallToCodePtr { - CCallHelpers::Call call; - CodePtr codePtr; -}; -} // annonymous namespace - -void linkPolymorphicCall(JSGlobalObject* globalObject, CallFrame* callFrame, CallLinkInfo& callLinkInfo, CallVariant newVariant) +void linkPolymorphicCall(VM& vm, JSCell* owner, CallFrame* callFrame, CallLinkInfo& callLinkInfo, CallVariant newVariant) { RELEASE_ASSERT(callLinkInfo.allowStubs()); - CallFrame* callerFrame = callFrame->callerFrame(); - VM& vm = globalObject->vm(); - // During execution of linkPolymorphicCall, we strongly assume that we never do GC. // GC jettisons CodeBlocks, changes CallLinkInfo etc. and breaks assumption done before and after this call. DeferGCForAWhile deferGCForAWhile(vm); if (!newVariant) { - linkVirtualFor(vm, callFrame, callLinkInfo); + callLinkInfo.setVirtualCall(vm); return; } - JSCell* owner = callerFrame->codeOwnerCell(); CodeBlock* callerCodeBlock = jsDynamicCast(owner); // WebAssembly -> JS stubs don't have a valid CodeBlock. ASSERT(owner); #if ENABLE(WEBASSEMBLY) @@ -1758,11 +1799,15 @@ void linkPolymorphicCall(JSGlobalObject* globalObject, CallFrame* callFrame, Cal #else bool isWebAssembly = false; #endif + bool isDataIC = callLinkInfo.isDataIC(); + bool isTailCall = callLinkInfo.isTailCall(); + bool isClosureCall = false; CallVariantList list; - if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) + if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) { list = stub->variants(); - else if (JSObject* oldCallee = callLinkInfo.callee()) + isClosureCall = stub->isClosureCall(); + } else if (JSObject* oldCallee = callLinkInfo.callee()) list = CallVariantList { CallVariant(oldCallee) }; list = variantListWithVariant(list, newVariant); @@ -1770,20 +1815,35 @@ void linkPolymorphicCall(JSGlobalObject* globalObject, CallFrame* callFrame, Cal // If there are any closure calls then it makes sense to treat all of them as closure calls. // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG; // the DFG doesn't really want to deal with a combination of closure and non-closure callees. - bool isClosureCall = false; - for (CallVariant variant : list) { - if (variant.isClosureCall()) { - list = despecifiedVariantList(list); - isClosureCall = true; - break; + if (!isClosureCall) { + for (CallVariant variant : list) { + if (variant.isClosureCall()) { + list = despecifiedVariantList(list); + isClosureCall = true; + break; + } } } - + if (isClosureCall) callLinkInfo.setHasSeenClosure(); - - Vector callCases; - Vector caseValues; + + // If we are over the limit, just use a normal virtual call. + unsigned maxPolymorphicCallVariantListSize; + if (isWebAssembly) + maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForWebAssemblyToJS(); + else if (callerCodeBlock->jitType() == JITCode::topTierJIT()) + maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier(); + else + maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize(); + + // We use list.size() instead of callSlots.size() because we respect CallVariant size for now. + if (list.size() > maxPolymorphicCallVariantListSize) { + callLinkInfo.setVirtualCall(vm); + return; + } + + Vector callSlots; // Figure out what our cases are. for (CallVariant variant : list) { @@ -1794,294 +1854,193 @@ void linkPolymorphicCall(JSGlobalObject* globalObject, CallFrame* callFrame, Cal // If we cannot handle a callee, because we don't have a CodeBlock, // assume that it's better for this whole thing to be a virtual call. if (!codeBlock) { - linkVirtualFor(vm, callFrame, callLinkInfo); + callLinkInfo.setVirtualCall(vm); return; } } - int64_t newCaseValue = 0; + JSCell* caseValue = nullptr; if (isClosureCall) { - newCaseValue = bitwise_cast(variant.executable()); + caseValue = variant.executable(); // FIXME: We could add a fast path for InternalFunction with closure call. // https://bugs.webkit.org/show_bug.cgi?id=179311 - if (!newCaseValue) + if (!caseValue) continue; } else { if (auto* function = variant.function()) - newCaseValue = bitwise_cast(function); + caseValue = function; else - newCaseValue = bitwise_cast(variant.internalFunction()); + caseValue = variant.internalFunction(); } - if (ASSERT_ENABLED) { - if (caseValues.contains(newCaseValue)) { - dataLog("ERROR: Attempt to add duplicate case value.\n"); - dataLog("Existing case values: "); - CommaPrinter comma; - for (auto& value : caseValues) - dataLog(comma, value); - dataLog("\n"); - dataLog("Attempting to add: ", newCaseValue, "\n"); - dataLog("Variant list: ", listDump(callCases), "\n"); - RELEASE_ASSERT_NOT_REACHED(); + CallSlot slot; + + CodePtr codePtr; + if (variant.executable()) { + ASSERT(variant.executable()->hasJITCodeForCall()); + + codePtr = jsToWasmICCodePtr(callLinkInfo.specializationKind(), variant.function()); + if (!codePtr) { + ArityCheckMode arityCheck = ArityCheckNotRequired; + if (codeBlock) { + ASSERT(!variant.executable()->isHostFunction()); + if ((callFrame->argumentCountIncludingThis() < static_cast(codeBlock->numParameters()) || callLinkInfo.isVarargs())) + arityCheck = MustCheckArity; + + } + codePtr = variant.executable()->generatedJITCodeForCall()->addressForCall(arityCheck); + slot.m_arityCheckMode = arityCheck; } + } else { + ASSERT(variant.internalFunction()); + codePtr = vm.getCTIInternalFunctionTrampolineFor(CodeForCall); } - callCases.append(PolymorphicCallCase(variant, codeBlock)); - caseValues.append(newCaseValue); + slot.m_index = callSlots.size(); + slot.m_target = codePtr; + slot.m_codeBlock = codeBlock; + slot.m_calleeOrExecutable = caseValue; + + callSlots.append(WTFMove(slot)); } - ASSERT(callCases.size() == caseValues.size()); - // If we are over the limit, just use a normal virtual call. - unsigned maxPolymorphicCallVariantListSize; - if (isWebAssembly) - maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForWebAssemblyToJS(); - else if (callerCodeBlock->jitType() == JITCode::topTierJIT()) - maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier(); - else - maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize(); + bool notUsingCounting = isWebAssembly || callerCodeBlock->jitType() == JITCode::topTierJIT(); + if (callSlots.isEmpty()) + notUsingCounting = true; + + CallFrame* callerFrame = nullptr; + if (!isTailCall) + callerFrame = callFrame->callerFrame(); + + if (isDataIC) { + CommonJITThunkID jitThunk = CommonJITThunkID::PolymorphicThunkForRegularCall; + if (isClosureCall) + jitThunk = isTailCall ? CommonJITThunkID::PolymorphicThunkForTailCallForClosure : CommonJITThunkID::PolymorphicThunkForRegularCallForClosure; + else + jitThunk = isTailCall ? CommonJITThunkID::PolymorphicThunkForTailCall : CommonJITThunkID::PolymorphicThunkForRegularCall; + + auto stubRoutine = PolymorphicCallStubRoutine::create(vm.getCTIStub(jitThunk).retagged(), vm, owner, callerFrame, callLinkInfo, callSlots, nullptr, notUsingCounting, isClosureCall); + + // If there had been a previous stub routine, that one will die as soon as the GC runs and sees + // that it's no longer on stack. + callLinkInfo.setStub(WTFMove(stubRoutine)); + + // The call link info no longer has a call cache apart from the jump to the polymorphic call + // stub. + if (callLinkInfo.isOnList()) + callLinkInfo.remove(); - // We use list.size() instead of callCases.size() because we respect CallVariant size for now. - if (list.size() > maxPolymorphicCallVariantListSize) { - linkVirtualFor(vm, callFrame, callLinkInfo); return; } - Vector calls(callCases.size()); + ASSERT(callLinkInfo.type() == CallLinkInfo::Type::Optimizing); + + CCallHelpers jit(callerCodeBlock); + GPRReg calleeGPR = BaselineJITRegisters::Call::calleeGPR; + UniqueArray fastCounts; - if (!isWebAssembly && callerCodeBlock->jitType() != JITCode::topTierJIT()) { - fastCounts = makeUniqueArray(callCases.size()); - memset(fastCounts.get(), 0, callCases.size() * sizeof(uint32_t)); + if (!notUsingCounting) { + fastCounts = makeUniqueArray(callSlots.size()); + memset(fastCounts.get(), 0, callSlots.size() * sizeof(uint32_t)); } - - GPRReg calleeGPR = callLinkInfo.calleeGPR(); - bool isDataIC = callLinkInfo.isDataIC(); - CCallHelpers stubJit(callerCodeBlock); - - std::optional frameShuffler; - switch (callLinkInfo.type()) { - case CallLinkInfo::Type::Baseline: { - auto* instruction = callerCodeBlock->instructionAt(callLinkInfo.codeOrigin().bytecodeIndex()); - if (instruction->opcodeID() == op_tail_call) { - auto bytecode = instruction->as(); - CallFrameShuffleData shuffleData = CallFrameShuffleData::createForBaselineOrLLIntTailCall(bytecode, callerCodeBlock->numParameters()); - frameShuffler.emplace(stubJit, shuffleData); - } - break; - } - case CallLinkInfo::Type::Optimizing: { - auto& optimizingCallLinkInfo = static_cast(callLinkInfo); - if (optimizingCallLinkInfo.frameShuffleData()) { - ASSERT(callLinkInfo.isTailCall()); - frameShuffler.emplace(stubJit, *optimizingCallLinkInfo.frameShuffleData()); + Vector caseValues; + caseValues.reserveInitialCapacity(callSlots.size()); + for (auto& slot : callSlots) { + int64_t caseValue = bitwise_cast(slot.m_calleeOrExecutable); +#if ASSERT_ENABLED + if (caseValues.contains(caseValue)) { + dataLog("ERROR: Attempt to add duplicate case value.\n"); + dataLog("Existing case values: "); + CommaPrinter comma; + for (auto& value : caseValues) + dataLog(comma, value); + dataLog("\n"); + dataLog("Attempting to add: ", caseValue, "\n"); + dataLog("Variant list: ", listDump(callSlots.map([&](auto& slot) { + return PolymorphicCallCase(CallVariant(slot.m_calleeOrExecutable), slot.m_codeBlock); + })), "\n"); + RELEASE_ASSERT_NOT_REACHED(); } - break; - } - } - - if (frameShuffler) { -#if USE(JSVALUE32_64) - // We would have already checked that the callee is a cell, and we can - // use the additional register this buys us. - frameShuffler->assumeCalleeIsCell(); #endif - frameShuffler->lockGPR(calleeGPR); + caseValues.append(caseValue); } - GPRReg comparisonValueGPR; - if (isClosureCall) { - if (frameShuffler) - comparisonValueGPR = frameShuffler->acquireGPR(); - else - comparisonValueGPR = AssemblyHelpers::selectScratchGPR(calleeGPR); - } else - comparisonValueGPR = calleeGPR; - - GPRReg fastCountsBaseGPR; - if (frameShuffler) - fastCountsBaseGPR = frameShuffler->acquireGPR(); - else { - fastCountsBaseGPR = - AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3); - } - stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR); + GPRReg comparisonValueGPR = calleeGPR; + if (isClosureCall) + comparisonValueGPR = AssemblyHelpers::selectScratchGPR(calleeGPR); - if (!frameShuffler && callLinkInfo.isTailCall()) { - // We strongly assume that calleeGPR is not a callee save register in the slow path. - ASSERT(!callerCodeBlock->jitCode()->calleeSaveRegisters()->find(calleeGPR)); - stubJit.emitRestoreCalleeSavesFor(callerCodeBlock->jitCode()->calleeSaveRegisters()); - } + GPRReg fastCountsBaseGPR = AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR); + jit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR); CCallHelpers::JumpList slowPath; if (isClosureCall) { // Verify that we have a function and stash the executable in scratchGPR. #if USE(JSVALUE64) - if (callLinkInfo.isTailCall()) - slowPath.append(stubJit.branchIfNotCell(calleeGPR, DoNotHaveTagRegisters)); + if (isTailCall) + slowPath.append(jit.branchIfNotCell(calleeGPR, DoNotHaveTagRegisters)); else - slowPath.append(stubJit.branchIfNotCell(calleeGPR)); + slowPath.append(jit.branchIfNotCell(calleeGPR)); #else // We would have already checked that the callee is a cell. #endif // FIXME: We could add a fast path for InternalFunction with closure call. - slowPath.append(stubJit.branchIfNotFunction(calleeGPR)); + slowPath.append(jit.branchIfNotFunction(calleeGPR)); - stubJit.loadPtr(CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutableOrRareData()), comparisonValueGPR); - auto hasExecutable = stubJit.branchTestPtr(CCallHelpers::Zero, comparisonValueGPR, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)); - stubJit.loadPtr(CCallHelpers::Address(comparisonValueGPR, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), comparisonValueGPR); - hasExecutable.link(&stubJit); + jit.loadPtr(CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutableOrRareData()), comparisonValueGPR); + auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, comparisonValueGPR, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)); + jit.loadPtr(CCallHelpers::Address(comparisonValueGPR, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), comparisonValueGPR); + hasExecutable.link(&jit); } BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr); - CCallHelpers::JumpList done; - while (binarySwitch.advance(stubJit)) { + while (binarySwitch.advance(jit)) { size_t caseIndex = binarySwitch.caseIndex(); - - PolymorphicCallCase& callCase = callCases[caseIndex]; - CallVariant variant = callCase.variant(); - - CodePtr codePtr; - if (variant.executable()) { - ASSERT(variant.executable()->hasJITCodeForCall()); - - codePtr = jsToWasmICCodePtr(callLinkInfo.specializationKind(), variant.function()); - if (!codePtr) { - ArityCheckMode arityCheck = ArityCheckNotRequired; - if (auto* codeBlock = callCase.codeBlock()) { - ASSERT(!variant.executable()->isHostFunction()); - if ((callFrame->argumentCountIncludingThis() < static_cast(codeBlock->numParameters()) || callLinkInfo.isVarargs())) - arityCheck = MustCheckArity; - - } - codePtr = variant.executable()->generatedJITCodeForCall()->addressForCall(arityCheck); - } - } else { - ASSERT(variant.internalFunction()); - codePtr = vm.getCTIInternalFunctionTrampolineFor(CodeForCall); - } - + auto& slot = callSlots[caseIndex]; + CallVariant variant(slot.m_calleeOrExecutable); + CodeBlock* codeBlock = slot.m_codeBlock; + CodePtr codePtr = slot.m_target; if (fastCounts) { - stubJit.add32( + jit.add32( CCallHelpers::TrustedImm32(1), CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t))); } - - bool needsDoneJump = false; - if (frameShuffler) { - CallFrameShuffler(stubJit, frameShuffler->snapshot()).prepareForTailCall(); - if (callCase.codeBlock()) - stubJit.storePtr(CCallHelpers::TrustedImmPtr(callCase.codeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); - calls[caseIndex].call = stubJit.nearTailCall(); - } else if (callLinkInfo.isTailCall()) { - stubJit.prepareForTailCallSlow(); - if (callCase.codeBlock()) - stubJit.storePtr(CCallHelpers::TrustedImmPtr(callCase.codeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); - calls[caseIndex].call = stubJit.nearTailCall(); + if (isTailCall) { + if (codeBlock) + jit.storePtr(CCallHelpers::TrustedImmPtr(codeBlock), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); + jit.nearTailCallThunk(CodeLocationLabel { codePtr }); } else { - ASSERT(!callLinkInfo.isTailCall()); - if (isDataIC) { - if (callCase.codeBlock()) - stubJit.storePtr(CCallHelpers::TrustedImmPtr(callCase.codeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeTailCall()); - calls[caseIndex].call = stubJit.nearTailCall(); - } else { - if (callCase.codeBlock()) - stubJit.storePtr(CCallHelpers::TrustedImmPtr(callCase.codeBlock()), CCallHelpers::calleeFrameCodeBlockBeforeCall()); - calls[caseIndex].call = stubJit.nearCall(); - needsDoneJump = true; - } - } - calls[caseIndex].codePtr = codePtr; - if (needsDoneJump) { - ASSERT(!isDataIC); - done.append(stubJit.jump()); + if (codeBlock) + jit.storePtr(CCallHelpers::TrustedImmPtr(codeBlock), CCallHelpers::calleeFrameCodeBlockBeforeCall()); + jit.nearCallThunk(CodeLocationLabel { codePtr }); + jit.jumpThunk(callLinkInfo.doneLocation()); } } - - slowPath.link(&stubJit); - binarySwitch.fallThrough().link(&stubJit); - if (frameShuffler) { - frameShuffler->releaseGPR(calleeGPR); - frameShuffler->releaseGPR(comparisonValueGPR); - frameShuffler->releaseGPR(fastCountsBaseGPR); -#if USE(JSVALUE32_64) - frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0)); -#else - frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0)); -#endif - frameShuffler->prepareForSlowPath(); - } else { - stubJit.move(calleeGPR, GPRInfo::regT0); -#if USE(JSVALUE32_64) - stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1); -#endif - } - stubJit.move(CCallHelpers::TrustedImmPtr(globalObject), GPRInfo::regT3); - stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2); - - // 1. If it is not DataIC, linkRegister is not pointing the doneLocation. - // 2. If it is tail-call, linkRegister is not pointing the doneLocation for slow-call case. But since we are not executing prepareForTailCall, we still stack entries for the caller's frame. - // 3. If we're a data IC, then the return address is already correct - // Thus we need to put it for the slow-path call. - if (!isDataIC) { - stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.doneLocation().untaggedPtr()), GPRInfo::regT4); - stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4); - } else { - // FIXME: We are not doing a real tail-call in this case. We leave stack entries in the caller, and we are not running prepareForTailCall, thus, - // we will return to the caller after the callee finishes. We should make it a real tail-call for this slow path case. - switch (callLinkInfo.type()) { - case CallLinkInfo::Type::Baseline: { -#if ASSERT_ENABLED - // It needs to be LLInt or Baseline since we are using returnFromBaselineGenerator. - if (!isWebAssembly) - ASSERT(!JITCode::isOptimizingJIT(callerCodeBlock->jitType())); -#endif - if (callLinkInfo.isTailCall()) { - stubJit.move(CCallHelpers::TrustedImmPtr(vm.getCTIStub(JIT::returnFromBaselineGenerator).code().untaggedPtr()), GPRInfo::regT4); - stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4); - } - break; - } - case CallLinkInfo::Type::Optimizing: { - // While Baseline / LLInt shares BaselineCallLinkInfo, OptimizingCallLinkInfo is exclusively used for one JIT code. - // Thus, we can safely use doneLocation. - if (callLinkInfo.isTailCall()) { - stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.doneLocation().untaggedPtr()), GPRInfo::regT4); - stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4); - } - break; - } - } + slowPath.link(&jit); + binarySwitch.fallThrough().link(&jit); + jit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2); + if (isTailCall) + jit.nearTailCallThunk(CodeLocationLabel { vm.getCTIStub(CommonJITThunkID::PolymorphicRepatchThunk).code() }); + else { + jit.nearCallThunk(CodeLocationLabel { vm.getCTIStub(CommonJITThunkID::PolymorphicRepatchThunk).code() }); + jit.jumpThunk(callLinkInfo.doneLocation()); } - AssemblyHelpers::Jump slow = stubJit.jump(); - - LinkBuffer patchBuffer(stubJit, owner, LinkBuffer::Profile::InlineCache, JITCompilationCanFail); + LinkBuffer patchBuffer(jit, owner, LinkBuffer::Profile::InlineCache, JITCompilationCanFail); if (patchBuffer.didFailToAllocate()) { - linkVirtualFor(vm, callFrame, callLinkInfo); + callLinkInfo.setVirtualCall(vm); return; } - RELEASE_ASSERT(callCases.size() == calls.size()); - for (CallToCodePtr callToCodePtr : calls) - patchBuffer.link(callToCodePtr.call, callToCodePtr.codePtr); - - if (!done.empty()) { - ASSERT(!isDataIC); - patchBuffer.link(done, callLinkInfo.doneLocation()); - } - patchBuffer.link(slow, CodeLocationLabel(vm.getCTIStub(linkPolymorphicCallThunkGenerator).code())); - - auto stubRoutine = adoptRef(*new PolymorphicCallStubRoutine( + auto stubRoutine = PolymorphicCallStubRoutine::create( FINALIZE_CODE_FOR( - callerCodeBlock, patchBuffer, JITStubRoutinePtrTag, + callerCodeBlock, patchBuffer, JITStubRoutinePtrTag, "PolymorphicCall"_s, "Polymorphic call stub for %s, return point %p, targets %s", isWebAssembly ? "WebAssembly" : toCString(*callerCodeBlock).data(), callLinkInfo.doneLocation().taggedPtr(), - toCString(listDump(callCases)).data()), - vm, owner, callFrame->callerFrame(), callLinkInfo, callCases, - WTFMove(fastCounts))); + toCString(listDump(callSlots.map([&](auto& slot) { return PolymorphicCallCase(CallVariant(slot.m_calleeOrExecutable), slot.m_codeBlock); }))).data()), + vm, owner, callerFrame, callLinkInfo, callSlots, WTFMove(fastCounts), notUsingCounting, isClosureCall); // The original slow path is unreachable on 64-bits, but still // reachable on 32-bits since a non-cell callee will always @@ -2101,20 +2060,7 @@ void linkPolymorphicCall(JSGlobalObject* globalObject, CallFrame* callFrame, Cal void resetGetBy(CodeBlock* codeBlock, StructureStubInfo& stubInfo, GetByKind kind) { repatchSlowPathCall(codeBlock, stubInfo, appropriateGetByOptimizeFunction(kind)); - switch (kind) { - case GetByKind::ById: - case GetByKind::ByIdWithThis: - case GetByKind::TryById: - case GetByKind::ByIdDirect: - case GetByKind::PrivateNameById: - InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); - break; - case GetByKind::ByVal: - case GetByKind::ByValWithThis: - case GetByKind::PrivateName: - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); - break; - } + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetPutBy(CodeBlock* codeBlock, StructureStubInfo& stubInfo, PutByKind kind) @@ -2160,66 +2106,56 @@ void resetPutBy(CodeBlock* codeBlock, StructureStubInfo& stubInfo, PutByKind kin } repatchSlowPathCall(codeBlock, stubInfo, optimizedFunction); - switch (kind) { - case PutByKind::ByIdStrict: - case PutByKind::ByIdSloppy: - case PutByKind::ByIdDirectStrict: - case PutByKind::ByIdDirectSloppy: - case PutByKind::DefinePrivateNameById: - case PutByKind::SetPrivateNameById: - InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); - break; - case PutByKind::ByValStrict: - case PutByKind::ByValSloppy: - case PutByKind::ByValDirectStrict: - case PutByKind::ByValDirectSloppy: - case PutByKind::DefinePrivateNameByVal: - case PutByKind::SetPrivateNameByVal: - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); - break; - } + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetDelBy(CodeBlock* codeBlock, StructureStubInfo& stubInfo, DelByKind kind) { - if (kind == DelByKind::ById) - repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByIdOptimize); - else - repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByValOptimize); - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); + switch (kind) { + case DelByKind::ByIdStrict: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByIdStrictOptimize); + break; + case DelByKind::ByIdSloppy: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByIdSloppyOptimize); + break; + case DelByKind::ByValStrict: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByValStrictOptimize); + break; + case DelByKind::ByValSloppy: + repatchSlowPathCall(codeBlock, stubInfo, operationDeleteByValSloppyOptimize); + break; + } + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetInBy(CodeBlock* codeBlock, StructureStubInfo& stubInfo, InByKind kind) { repatchSlowPathCall(codeBlock, stubInfo, appropriateInByOptimizeFunction(kind)); - if (kind == InByKind::ById) - InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); - else - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetHasPrivateBrand(CodeBlock* codeBlock, StructureStubInfo& stubInfo) { repatchSlowPathCall(codeBlock, stubInfo, operationHasPrivateBrandOptimize); - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetInstanceOf(CodeBlock* codeBlock, StructureStubInfo& stubInfo) { repatchSlowPathCall(codeBlock, stubInfo, operationInstanceOfOptimize); - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetCheckPrivateBrand(CodeBlock* codeBlock, StructureStubInfo& stubInfo) { repatchSlowPathCall(codeBlock, stubInfo, operationCheckPrivateBrandOptimize); - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } void resetSetPrivateBrand(CodeBlock* codeBlock, StructureStubInfo& stubInfo) { repatchSlowPathCall(codeBlock, stubInfo, operationSetPrivateBrandOptimize); - InlineAccess::resetStubAsJumpInAccessNotUsingInlineAccess(codeBlock, stubInfo); + InlineAccess::resetStubAsJumpInAccess(codeBlock, stubInfo); } #endif diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.h b/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.h index 77135f9b..87e6dd5f 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/Repatch.h @@ -31,6 +31,7 @@ namespace JSC { class CallLinkInfo; +class DirectCallLinkInfo; class OptimizingCallLinkInfo; class StructureStubInfo; @@ -61,8 +62,10 @@ enum class PutByKind { }; enum class DelByKind { - ById, - ByVal + ByIdStrict, + ByIdSloppy, + ByValStrict, + ByValSloppy, }; enum class InByKind { @@ -76,15 +79,15 @@ void repatchGetBy(JSGlobalObject*, CodeBlock*, JSValue, CacheableIdentifier, con void repatchArrayPutByVal(JSGlobalObject*, CodeBlock*, JSValue base, JSValue index, StructureStubInfo&, PutByKind); void repatchPutBy(JSGlobalObject*, CodeBlock*, JSValue, Structure*, CacheableIdentifier, const PutPropertySlot&, StructureStubInfo&, PutByKind); void repatchDeleteBy(JSGlobalObject*, CodeBlock*, DeletePropertySlot&, JSValue, Structure*, CacheableIdentifier, StructureStubInfo&, DelByKind, ECMAMode); +void repatchArrayInByVal(JSGlobalObject*, CodeBlock*, JSValue base, JSValue index, StructureStubInfo&, InByKind); void repatchInBy(JSGlobalObject*, CodeBlock*, JSObject*, CacheableIdentifier, bool wasFound, const PropertySlot&, StructureStubInfo&, InByKind); void repatchHasPrivateBrand(JSGlobalObject*, CodeBlock*, JSObject*, CacheableIdentifier, bool wasFound, StructureStubInfo&); void repatchCheckPrivateBrand(JSGlobalObject*, CodeBlock*, JSObject*, CacheableIdentifier, StructureStubInfo&); void repatchSetPrivateBrand(JSGlobalObject*, CodeBlock*, JSObject*, Structure*, CacheableIdentifier, StructureStubInfo&); void repatchInstanceOf(JSGlobalObject*, CodeBlock*, JSValue, JSValue prototype, StructureStubInfo&, bool wasFound); -void linkMonomorphicCall(VM&, CallFrame*, CallLinkInfo&, CodeBlock*, JSObject* callee, CodePtr); -void linkDirectCall(CallFrame*, OptimizingCallLinkInfo&, CodeBlock*, CodePtr); -void unlinkCall(VM&, CallLinkInfo&); -void linkPolymorphicCall(JSGlobalObject*, CallFrame*, CallLinkInfo&, CallVariant); +void linkMonomorphicCall(VM&, JSCell*, CallLinkInfo&, CodeBlock*, JSObject* callee, CodePtr); +void linkDirectCall(DirectCallLinkInfo&, CodeBlock*, CodePtr); +void linkPolymorphicCall(VM&, JSCell*, CallFrame*, CallLinkInfo&, CallVariant); void resetGetBy(CodeBlock*, StructureStubInfo&, GetByKind); void resetPutBy(CodeBlock*, StructureStubInfo&, PutByKind); void resetDelBy(CodeBlock*, StructureStubInfo&, DelByKind); @@ -96,6 +99,7 @@ void resetSetPrivateBrand(CodeBlock*, StructureStubInfo&); void repatchGetBySlowPathCall(CodeBlock*, StructureStubInfo&, GetByKind); void repatchPutBySlowPathCall(CodeBlock*, StructureStubInfo&, PutByKind); +void repatchInBySlowPathCall(CodeBlock*, StructureStubInfo&, InByKind); void ftlThunkAwareRepatchCall(CodeBlock*, CodeLocationCall, CodePtr newCalleeFunction); CodePtr jsToWasmICCodePtr(CodeSpecializationKind, JSObject* callee); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/RepatchInlines.h b/vendor/webkit/Source/JavaScriptCore/bytecode/RepatchInlines.h index d96c29ac..0d7e050c 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/RepatchInlines.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/RepatchInlines.h @@ -25,6 +25,7 @@ #pragma once +#include "ErrorInstance.h" #include "FrameTracers.h" #include "LLIntEntrypoint.h" #include "Repatch.h" @@ -33,9 +34,51 @@ namespace JSC { -inline UGPRPair handleHostCall(JSGlobalObject* globalObject, CallFrame* calleeFrame, JSValue callee, CallLinkInfo* callLinkInfo) +inline void* throwNotAFunctionErrorFromCallIC(JSGlobalObject* globalObject, JSCell* owner, JSValue callee, CallLinkInfo* callLinkInfo) +{ + VM& vm = getVM(globalObject); + auto scope = DECLARE_THROW_SCOPE(vm); + auto errorMessage = constructErrorMessage(globalObject, callee, "is not a function"_s); + RETURN_IF_EXCEPTION(scope, nullptr); + if (UNLIKELY(!errorMessage)) { + throwOutOfMemoryError(globalObject, scope); + return nullptr; + } + + // Call IC will throw these errors after throwing away the caller's frame when it is a tail-call. + // But we would like to have error information for them from the thrown frame. + // This frame information can be reconstructed easily since we have CodeOrigin and owner CodeBlock for CallLinkInfo. + auto [codeBlock, bytecodeIndex] = callLinkInfo->retrieveCaller(owner); + if (codeBlock) + errorMessage = appendSourceToErrorMessage(codeBlock, bytecodeIndex, errorMessage, runtimeTypeForValue(callee), notAFunctionSourceAppender); + auto* error = ErrorInstance::create(vm, globalObject->errorStructure(ErrorType::TypeError), errorMessage, JSValue(), ErrorType::TypeError, owner, callLinkInfo); + throwException(globalObject, scope, error); + return nullptr; +} + +inline void* throwNotAConstructorErrorFromCallIC(JSGlobalObject* globalObject, JSCell* owner, JSValue callee, CallLinkInfo* callLinkInfo) +{ + VM& vm = getVM(globalObject); + auto scope = DECLARE_THROW_SCOPE(vm); + auto errorMessage = constructErrorMessage(globalObject, callee, "is not a constructor"_s); + RETURN_IF_EXCEPTION(scope, nullptr); + if (UNLIKELY(!errorMessage)) { + throwOutOfMemoryError(globalObject, scope); + return nullptr; + } + // Call IC will throw these errors after throwing away the caller's frame when it is a tail-call. + // But we would like to have error information for them from the thrown frame. + // This frame information can be reconstructed easily since we have CodeOrigin and owner CodeBlock for CallLinkInfo. + auto [codeBlock, bytecodeIndex] = callLinkInfo->retrieveCaller(owner); + if (codeBlock) + errorMessage = appendSourceToErrorMessage(codeBlock, bytecodeIndex, errorMessage, runtimeTypeForValue(callee), defaultSourceAppender); + auto* error = ErrorInstance::create(vm, globalObject->errorStructure(ErrorType::TypeError), errorMessage, JSValue(), ErrorType::TypeError, owner, callLinkInfo); + throwException(globalObject, scope, error); + return nullptr; +} + +inline void* handleHostCall(VM& vm, JSCell* owner, CallFrame* calleeFrame, JSValue callee, CallLinkInfo* callLinkInfo) { - VM& vm = globalObject->vm(); auto scope = DECLARE_THROW_SCOPE(vm); calleeFrame->setCodeBlock(nullptr); @@ -49,22 +92,15 @@ inline UGPRPair handleHostCall(JSGlobalObject* globalObject, CallFrame* calleeFr calleeFrame->setCallee(asObject(callee)); vm.encodedHostCallReturnValue = callData.native.function(asObject(callee)->globalObject(), calleeFrame); DisallowGC disallowGC; - if (UNLIKELY(scope.exception())) { - return encodeResult( - vm.getCTIThrowExceptionFromCallSlowPath().code().taggedPtr(), - reinterpret_cast(KeepTheFrame)); - } - - return encodeResult( - LLInt::getHostCallReturnValueEntrypoint().code().taggedPtr(), - reinterpret_cast(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame)); + if (UNLIKELY(scope.exception())) + return nullptr; + return LLInt::getHostCallReturnValueEntrypoint().code().taggedPtr(); } + auto* globalObject = callLinkInfo->globalObjectForSlowPath(owner); + calleeFrame->setCallee(globalObject->partiallyInitializedFrameCallee()); ASSERT(callData.type == CallData::Type::None); - throwException(globalObject, scope, createNotAFunctionError(globalObject, callee)); - return encodeResult( - vm.getCTIThrowExceptionFromCallSlowPath().code().taggedPtr(), - reinterpret_cast(KeepTheFrame)); + RELEASE_AND_RETURN(scope, throwNotAFunctionErrorFromCallIC(globalObject, owner, callee, callLinkInfo)); } ASSERT(callLinkInfo->specializationKind() == CodeForConstruct); @@ -77,32 +113,22 @@ inline UGPRPair handleHostCall(JSGlobalObject* globalObject, CallFrame* calleeFr calleeFrame->setCallee(asObject(callee)); vm.encodedHostCallReturnValue = constructData.native.function(asObject(callee)->globalObject(), calleeFrame); DisallowGC disallowGC; - if (UNLIKELY(scope.exception())) { - return encodeResult( - vm.getCTIThrowExceptionFromCallSlowPath().code().taggedPtr(), - reinterpret_cast(KeepTheFrame)); - } - - return encodeResult(LLInt::getHostCallReturnValueEntrypoint().code().taggedPtr(), reinterpret_cast(KeepTheFrame)); + if (UNLIKELY(scope.exception())) + return nullptr; + return LLInt::getHostCallReturnValueEntrypoint().code().taggedPtr(); } + auto* globalObject = callLinkInfo->globalObjectForSlowPath(owner); + calleeFrame->setCallee(globalObject->partiallyInitializedFrameCallee()); ASSERT(constructData.type == CallData::Type::None); - throwException(globalObject, scope, createNotAConstructorError(globalObject, callee)); - return encodeResult( - vm.getCTIThrowExceptionFromCallSlowPath().code().taggedPtr(), - reinterpret_cast(KeepTheFrame)); + RELEASE_AND_RETURN(scope, throwNotAConstructorErrorFromCallIC(globalObject, owner, callee, callLinkInfo)); } -ALWAYS_INLINE UGPRPair linkFor(CallFrame* calleeFrame, JSGlobalObject* globalObject, CallLinkInfo* callLinkInfo) +ALWAYS_INLINE void* linkFor(VM& vm, JSCell* owner, CallFrame* calleeFrame, CallLinkInfo* callLinkInfo) { - CallFrame* callFrame = calleeFrame->callerFrame(); - VM& vm = globalObject->vm(); auto throwScope = DECLARE_THROW_SCOPE(vm); CodeSpecializationKind kind = callLinkInfo->specializationKind(); - NativeCallFrameTracer tracer(vm, callFrame); - - RELEASE_ASSERT(!callLinkInfo->isDirect()); JSValue calleeAsValue = calleeFrame->guaranteedJSValueCallee(); JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue); @@ -111,15 +137,32 @@ ALWAYS_INLINE UGPRPair linkFor(CallFrame* calleeFrame, JSGlobalObject* globalObj CodePtr codePtr = vm.getCTIInternalFunctionTrampolineFor(kind); RELEASE_ASSERT(!!codePtr); - if (!callLinkInfo->seenOnce()) - callLinkInfo->setSeen(); - else - linkMonomorphicCall(vm, calleeFrame, *callLinkInfo, nullptr, internalFunction, codePtr); + switch (callLinkInfo->mode()) { + case CallLinkInfo::Mode::Init: { + if (!callLinkInfo->seenOnce()) + callLinkInfo->setSeen(); + else + linkMonomorphicCall(vm, owner, *callLinkInfo, nullptr, internalFunction, codePtr); + break; + } + case CallLinkInfo::Mode::Monomorphic: + case CallLinkInfo::Mode::Polymorphic: { +#if ENABLE(JIT) + if (kind == CodeForCall && callLinkInfo->allowStubs()) { + linkPolymorphicCall(vm, owner, calleeFrame, *callLinkInfo, CallVariant(internalFunction)); + break; + } +#endif + callLinkInfo->setVirtualCall(vm); + break; + } + case CallLinkInfo::Mode::Virtual: + break; + } - void* linkedTarget = codePtr.taggedPtr(); - return encodeResult(linkedTarget, reinterpret_cast(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame)); + return codePtr.taggedPtr(); } - RELEASE_AND_RETURN(throwScope, handleHostCall(globalObject, calleeFrame, calleeAsValue, callLinkInfo)); + RELEASE_AND_RETURN(throwScope, handleHostCall(vm, owner, calleeFrame, calleeAsValue, callLinkInfo)); } JSFunction* callee = jsCast(calleeAsFunctionCell); @@ -138,19 +181,15 @@ ALWAYS_INLINE UGPRPair linkFor(CallFrame* calleeFrame, JSGlobalObject* globalObj } else { FunctionExecutable* functionExecutable = static_cast(executable); - auto handleThrowException = [&] () { - void* throwTarget = vm.getCTIThrowExceptionFromCallSlowPath().code().taggedPtr(); - return encodeResult(throwTarget, reinterpret_cast(KeepTheFrame)); - }; - if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) { - throwException(globalObject, throwScope, createNotAConstructorError(globalObject, callee)); - return handleThrowException(); + auto* globalObject = callLinkInfo->globalObjectForSlowPath(owner); + calleeFrame->setCallee(globalObject->partiallyInitializedFrameCallee()); + RELEASE_AND_RETURN(throwScope, throwNotAConstructorErrorFromCallIC(globalObject, owner, callee, callLinkInfo)); } CodeBlock** codeBlockSlot = calleeFrame->addressOfCodeBlock(); functionExecutable->prepareForExecution(vm, callee, scope, kind, *codeBlockSlot); - RETURN_IF_EXCEPTION(throwScope, handleThrowException()); + RETURN_IF_EXCEPTION(throwScope, nullptr); codeBlock = *codeBlockSlot; ASSERT(codeBlock); @@ -163,22 +202,37 @@ ALWAYS_INLINE UGPRPair linkFor(CallFrame* calleeFrame, JSGlobalObject* globalObj codePtr = functionExecutable->entrypointFor(kind, arity); } - if (!callLinkInfo->seenOnce()) - callLinkInfo->setSeen(); - else - linkMonomorphicCall(vm, calleeFrame, *callLinkInfo, codeBlock, callee, codePtr); + switch (callLinkInfo->mode()) { + case CallLinkInfo::Mode::Init: { + if (!callLinkInfo->seenOnce()) + callLinkInfo->setSeen(); + else + linkMonomorphicCall(vm, owner, *callLinkInfo, codeBlock, callee, codePtr); + break; + } + case CallLinkInfo::Mode::Monomorphic: + case CallLinkInfo::Mode::Polymorphic: { +#if ENABLE(JIT) + if (kind == CodeForCall && callLinkInfo->allowStubs()) { + linkPolymorphicCall(vm, owner, calleeFrame, *callLinkInfo, CallVariant(callee)); + break; + } +#endif + callLinkInfo->setVirtualCall(vm); + break; + } + case CallLinkInfo::Mode::Virtual: + break; + } - return encodeResult(codePtr.taggedPtr(), reinterpret_cast(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame)); + return codePtr.taggedPtr(); } -ALWAYS_INLINE UGPRPair virtualForWithFunction(JSGlobalObject* globalObject, CallFrame* calleeFrame, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell) +ALWAYS_INLINE void* virtualForWithFunction(VM& vm, JSCell* owner, CallFrame* calleeFrame, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell) { - CallFrame* callFrame = calleeFrame->callerFrame(); - VM& vm = globalObject->vm(); auto throwScope = DECLARE_THROW_SCOPE(vm); CodeSpecializationKind kind = callLinkInfo->specializationKind(); - NativeCallFrameTracer tracer(vm, callFrame); JSValue calleeAsValue = calleeFrame->guaranteedJSValueCallee(); calleeAsFunctionCell = getJSFunction(calleeAsValue); @@ -186,9 +240,9 @@ ALWAYS_INLINE UGPRPair virtualForWithFunction(JSGlobalObject* globalObject, Call if (jsDynamicCast(calleeAsValue)) { CodePtr codePtr = vm.getCTIInternalFunctionTrampolineFor(kind); ASSERT(!!codePtr); - return encodeResult(codePtr.taggedPtr(), reinterpret_cast(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame)); + return codePtr.taggedPtr(); } - RELEASE_AND_RETURN(throwScope, handleHostCall(globalObject, calleeFrame, calleeAsValue, callLinkInfo)); + RELEASE_AND_RETURN(throwScope, handleHostCall(vm, owner, calleeFrame, calleeAsValue, callLinkInfo)); } JSFunction* function = jsCast(calleeAsFunctionCell); @@ -200,26 +254,20 @@ ALWAYS_INLINE UGPRPair virtualForWithFunction(JSGlobalObject* globalObject, Call if (!executable->isHostFunction()) { FunctionExecutable* functionExecutable = jsCast(executable); - auto handleThrowException = [&] () { - void* throwTarget = vm.getCTIThrowExceptionFromCallSlowPath().code().taggedPtr(); - return encodeResult(throwTarget, reinterpret_cast(KeepTheFrame)); - }; - if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) { - throwException(globalObject, throwScope, createNotAConstructorError(globalObject, function)); - return handleThrowException(); + auto* globalObject = callLinkInfo->globalObjectForSlowPath(owner); + calleeFrame->setCallee(globalObject->partiallyInitializedFrameCallee()); + RELEASE_AND_RETURN(throwScope, throwNotAConstructorErrorFromCallIC(globalObject, owner, function, callLinkInfo)); } CodeBlock** codeBlockSlot = calleeFrame->addressOfCodeBlock(); functionExecutable->prepareForExecution(vm, function, scope, kind, *codeBlockSlot); - RETURN_IF_EXCEPTION(throwScope, handleThrowException()); + RETURN_IF_EXCEPTION(throwScope, nullptr); } // FIXME: Support wasm IC. // https://bugs.webkit.org/show_bug.cgi?id=220339 - return encodeResult(executable->entrypointFor( - kind, MustCheckArity).taggedPtr(), - reinterpret_cast(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame)); + return executable->entrypointFor(kind, MustCheckArity).taggedPtr(); } } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.cpp index da55fa24..c704b0ec 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Apple Inc. All rights reserved. + * Copyright (C) 2021-2023 Apple Inc. All rights reserved. * Copyright (C) 2021 Igalia S.A. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,9 +33,12 @@ #include "InlineCacheCompiler.h" #include "StructureStubInfo.h" #include +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(SetPrivateBrandStatus); + bool SetPrivateBrandStatus::appendVariant(const SetPrivateBrandVariant& variant) { return appendICStatusVariant(m_variants, variant); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.h b/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.h index 48ea67e4..8f451588 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandStatus.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Apple Inc. All rights reserved. + * Copyright (C) 2021-2023 Apple Inc. All rights reserved. * Copyright (C) 2021 Igalia S.A. All rights reserved. * * @@ -34,6 +34,7 @@ #include "ICStatusMap.h" #include "SetPrivateBrandVariant.h" #include "StubInfoSummary.h" +#include namespace JSC { @@ -43,7 +44,7 @@ class StructureSet; class StructureStubInfo; class SetPrivateBrandStatus final { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(SetPrivateBrandStatus); public: enum State : uint8_t { // It's uncached so we have no information. diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandVariant.h b/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandVariant.h index 53686e6b..afb03488 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandVariant.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/SetPrivateBrandVariant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Apple Inc. All rights reserved. + * Copyright (C) 2021-2023 Apple Inc. All rights reserved. * Copyright (C) 2021 Igalia S.A. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,13 +29,14 @@ #include "CacheableIdentifier.h" #include "SlotVisitorMacros.h" #include +#include namespace JSC { class SetPrivateBrandStatus; class SetPrivateBrandVariant { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(SetPrivateBrandVariant); public: SetPrivateBrandVariant(CacheableIdentifier, Structure* oldStructure, Structure* newStructure); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp index 837b46a9..2756aee9 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,9 +31,14 @@ #include "CodeBlock.h" #include "JSCellInlines.h" #include "StructureStubInfo.h" +#include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(AdaptiveValueStructureStubClearingWatchpoint); +WTF_MAKE_TZONE_ALLOCATED_IMPL(StructureTransitionStructureStubClearingWatchpoint); +WTF_MAKE_TZONE_ALLOCATED_IMPL(WatchpointsOnStructureStubInfo); + void StructureTransitionStructureStubClearingWatchpoint::fireInternal(VM& vm, const FireDetail&) { if (!m_holder->isValid()) diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h index 952345b9..a7ee3f95 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2022 Apple Inc. All rights reserved. + * Copyright (C) 2012-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,8 +31,8 @@ #include "ObjectPropertyCondition.h" #include "Watchpoint.h" #include -#include #include +#include namespace JSC { @@ -42,7 +42,7 @@ class WatchpointsOnStructureStubInfo; class StructureTransitionStructureStubClearingWatchpoint final : public Watchpoint { WTF_MAKE_NONCOPYABLE(StructureTransitionStructureStubClearingWatchpoint); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(StructureTransitionStructureStubClearingWatchpoint); public: StructureTransitionStructureStubClearingWatchpoint(const ObjectPropertyCondition& key, WatchpointsOnStructureStubInfo& holder) : Watchpoint(Watchpoint::Type::StructureTransitionStructureStubClearing) @@ -54,14 +54,14 @@ class StructureTransitionStructureStubClearingWatchpoint final : public Watchpoi void fireInternal(VM&, const FireDetail&); private: - PackedPtr m_holder; + WatchpointsOnStructureStubInfo* m_holder; ObjectPropertyCondition m_key; }; class AdaptiveValueStructureStubClearingWatchpoint final : public AdaptiveInferredPropertyValueWatchpointBase { using Base = AdaptiveInferredPropertyValueWatchpointBase; WTF_MAKE_NONCOPYABLE(AdaptiveValueStructureStubClearingWatchpoint); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(AdaptiveValueStructureStubClearingWatchpoint); void handleFire(VM&, const FireDetail&) final; @@ -75,12 +75,12 @@ class AdaptiveValueStructureStubClearingWatchpoint final : public AdaptiveInferr private: - PackedPtr m_holder; + WatchpointsOnStructureStubInfo* m_holder; }; class WatchpointsOnStructureStubInfo final { WTF_MAKE_NONCOPYABLE(WatchpointsOnStructureStubInfo); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(WatchpointsOnStructureStubInfo); public: WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo) : m_codeBlock(codeBlock) diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp index 8a8dfb3d..eee6fd4e 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp @@ -42,12 +42,11 @@ static constexpr bool verbose = false; StructureStubInfo::~StructureStubInfo() = default; -void StructureStubInfo::initGetByIdSelf(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset, CacheableIdentifier identifier) +void StructureStubInfo::initGetByIdSelf(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset) { ASSERT(m_cacheType == CacheType::Unset); ASSERT(hasConstantIdentifier); setCacheType(locker, CacheType::GetByIdSelf); - m_identifier = identifier; m_inlineAccessBaseStructureID.set(codeBlock->vm(), codeBlock, inlineAccessBaseStructure); byIdSelfOffset = offset; } @@ -64,20 +63,20 @@ void StructureStubInfo::initStringLength(const ConcurrentJSLockerBase& locker) setCacheType(locker, CacheType::StringLength); } -void StructureStubInfo::initPutByIdReplace(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset, CacheableIdentifier identifier) +void StructureStubInfo::initPutByIdReplace(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset) { ASSERT(m_cacheType == CacheType::Unset); + ASSERT(hasConstantIdentifier); setCacheType(locker, CacheType::PutByIdReplace); - m_identifier = identifier; m_inlineAccessBaseStructureID.set(codeBlock->vm(), codeBlock, inlineAccessBaseStructure); byIdSelfOffset = offset; } -void StructureStubInfo::initInByIdSelf(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset, CacheableIdentifier identifier) +void StructureStubInfo::initInByIdSelf(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset) { ASSERT(m_cacheType == CacheType::Unset); + ASSERT(hasConstantIdentifier); setCacheType(locker, CacheType::InByIdSelf); - m_identifier = identifier; m_inlineAccessBaseStructureID.set(codeBlock->vm(), codeBlock, inlineAccessBaseStructure); byIdSelfOffset = offset; } @@ -102,20 +101,10 @@ void StructureStubInfo::deref() void StructureStubInfo::aboutToDie() { - switch (m_cacheType) { - case CacheType::Stub: - m_stub->aboutToDie(); - return; - case CacheType::Unset: - case CacheType::GetByIdSelf: - case CacheType::PutByIdReplace: - case CacheType::InByIdSelf: - case CacheType::ArrayLength: - case CacheType::StringLength: + if (m_cacheType != CacheType::Stub) return; - } - - RELEASE_ASSERT_NOT_REACHED(); + if (m_handler) + m_handler->aboutToDie(); } AccessGenerationResult StructureStubInfo::addAccessCase( @@ -198,7 +187,7 @@ AccessGenerationResult StructureStubInfo::addAccessCase( // PolymorphicAccess. clearBufferedStructures(); - InlineCacheCompiler compiler(vm, globalObject, ecmaMode, *this); + InlineCacheCompiler compiler(codeBlock->jitType(), vm, globalObject, ecmaMode, *this); result = compiler.regenerate(locker, *m_stub, codeBlock); if (StructureStubInfoInternal::verbose) @@ -216,12 +205,13 @@ AccessGenerationResult StructureStubInfo::addAccessCase( // m_inlineAccessBaseStructureID. The reason we don't clear m_inlineAccessBaseStructureID while // we're buffered is because we rely on it to reset during GC if m_inlineAccessBaseStructureID // is collected. - m_identifier = nullptr; m_inlineAccessBaseStructureID.clear(); // If we generated some code then we don't want to attempt to repatch in the future until we // gather enough cases. bufferingCountdown = Options::repatchBufferingCountdown(); + m_handler = result.handler(); + m_codePtr = m_handler->callTarget(); return result; })(); vm.writeBarrier(codeBlock); @@ -231,7 +221,6 @@ AccessGenerationResult StructureStubInfo::addAccessCase( void StructureStubInfo::reset(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock) { clearBufferedStructures(); - m_identifier = nullptr; m_inlineAccessBaseStructureID.clear(); if (m_cacheType == CacheType::Unset) @@ -319,11 +308,17 @@ void StructureStubInfo::reset(const ConcurrentJSLockerBase& locker, CodeBlock* c case AccessType::InstanceOf: resetInstanceOf(codeBlock, *this); break; - case AccessType::DeleteByID: - resetDelBy(codeBlock, *this, DelByKind::ById); + case AccessType::DeleteByIdStrict: + resetDelBy(codeBlock, *this, DelByKind::ByIdStrict); break; - case AccessType::DeleteByVal: - resetDelBy(codeBlock, *this, DelByKind::ByVal); + case AccessType::DeleteByIdSloppy: + resetDelBy(codeBlock, *this, DelByKind::ByIdSloppy); + break; + case AccessType::DeleteByValStrict: + resetDelBy(codeBlock, *this, DelByKind::ByValStrict); + break; + case AccessType::DeleteByValSloppy: + resetDelBy(codeBlock, *this, DelByKind::ByValSloppy); break; case AccessType::CheckPrivateBrand: resetCheckPrivateBrand(codeBlock, *this); @@ -379,8 +374,12 @@ void StructureStubInfo::visitWeakReferences(const ConcurrentJSLockerBase& locker bool isValid = true; if (Structure* structure = inlineAccessBaseStructure()) isValid &= vm.heap.isMarked(structure); - if (m_cacheType == CacheType::Stub) - isValid &= m_stub->visitWeak(vm); + if (m_cacheType == CacheType::Stub) { + if (m_stub) + isValid &= m_stub->visitWeak(vm); + if (m_handler) + isValid &= m_handler->visitWeak(vm); + } if (isValid) return; @@ -422,6 +421,8 @@ StubInfoSummary StructureStubInfo::summary(VM& vm) const case AccessCase::IndexedMegamorphicLoad: case AccessCase::StoreMegamorphic: case AccessCase::IndexedMegamorphicStore: + case AccessCase::InMegamorphic: + case AccessCase::IndexedMegamorphicIn: return StubInfoSummary::Megamorphic; default: break; @@ -450,7 +451,9 @@ bool StructureStubInfo::containsPC(void* pc) const { if (m_cacheType != CacheType::Stub) return false; - return m_stub->containsPC(pc); + if (!m_handler) + return false; + return m_handler->containsPC(pc); } ALWAYS_INLINE void StructureStubInfo::setCacheType(const ConcurrentJSLockerBase&, CacheType newCacheType) @@ -461,10 +464,14 @@ ALWAYS_INLINE void StructureStubInfo::setCacheType(const ConcurrentJSLockerBase& static CodePtr slowOperationFromUnlinkedStructureStubInfo(const UnlinkedStructureStubInfo& unlinkedStubInfo) { switch (unlinkedStubInfo.accessType) { - case AccessType::DeleteByVal: - return operationDeleteByValOptimize; - case AccessType::DeleteByID: - return operationDeleteByIdOptimize; + case AccessType::DeleteByValStrict: + return operationDeleteByValStrictOptimize; + case AccessType::DeleteByValSloppy: + return operationDeleteByValSloppyOptimize; + case AccessType::DeleteByIdStrict: + return operationDeleteByIdStrictOptimize; + case AccessType::DeleteByIdSloppy: + return operationDeleteByIdSloppyOptimize; case AccessType::GetByVal: return operationGetByValOptimize; case AccessType::InstanceOf: @@ -523,14 +530,22 @@ static CodePtr slowOperationFromUnlinkedStructureStubInfo(const return { }; } -void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUnlinkedStructureStubInfo& unlinkedStubInfo) +void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(VM& vm, const BaselineUnlinkedStructureStubInfo& unlinkedStubInfo) { + ASSERT(!isCompilationThread()); accessType = unlinkedStubInfo.accessType; doneLocation = unlinkedStubInfo.doneLocation; - slowPathStartLocation = unlinkedStubInfo.slowPathStartLocation; + m_identifier = unlinkedStubInfo.m_identifier; callSiteIndex = CallSiteIndex(BytecodeIndex(unlinkedStubInfo.bytecodeIndex.offset())); codeOrigin = CodeOrigin(unlinkedStubInfo.bytecodeIndex); - m_codePtr = slowPathStartLocation; + if (Options::useHandlerIC()) { + m_handler = InlineCacheCompiler::generateSlowPathHandler(vm, accessType); + m_codePtr = m_handler->callTarget(); + } else { + m_handler = InlineCacheHandler::createNonHandlerSlowPath(unlinkedStubInfo.slowPathStartLocation); + m_codePtr = m_handler->callTarget(); + slowPathStartLocation = unlinkedStubInfo.slowPathStartLocation; + } propertyIsInt32 = unlinkedStubInfo.propertyIsInt32; canBeMegamorphic = unlinkedStubInfo.canBeMegamorphic; isEnumerator = unlinkedStubInfo.isEnumerator; @@ -539,38 +554,35 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn if (unlinkedStubInfo.canBeMegamorphic) bufferingCountdown = 1; - auto usedJSRs = RegisterSetBuilder::stubUnavailableRegisters(); - if (accessType == AccessType::GetById && unlinkedStubInfo.bytecodeIndex.checkpoint()) { - // For iterator_next, we can't clobber the "dontClobberJSR" register either. - usedJSRs.add(BaselineJITRegisters::GetById::FastPath::dontClobberJSR, IgnoreVectors); - } - usedRegisters = usedJSRs.buildScalarRegisterSet(); + usedRegisters = RegisterSetBuilder::stubUnavailableRegisters().buildScalarRegisterSet(); m_slowOperation = slowOperationFromUnlinkedStructureStubInfo(unlinkedStubInfo); switch (accessType) { - case AccessType::DeleteByVal: + case AccessType::DeleteByValStrict: + case AccessType::DeleteByValSloppy: hasConstantIdentifier = false; m_baseGPR = BaselineJITRegisters::DelByVal::baseJSR.payloadGPR(); m_extraGPR = BaselineJITRegisters::DelByVal::propertyJSR.payloadGPR(); - m_valueGPR = BaselineJITRegisters::DelByVal::FastPath::resultJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::DelByVal::FastPath::stubInfoGPR; + m_valueGPR = BaselineJITRegisters::DelByVal::resultJSR.payloadGPR(); + m_stubInfoGPR = BaselineJITRegisters::DelByVal::stubInfoGPR; #if USE(JSVALUE32_64) m_baseTagGPR = BaselineJITRegisters::DelByVal::baseJSR.tagGPR(); m_extraTagGPR = BaselineJITRegisters::DelByVal::propertyJSR.tagGPR(); - m_valueTagGPR = BaselineJITRegisters::DelByVal::FastPath::resultJSR.tagGPR(); + m_valueTagGPR = BaselineJITRegisters::DelByVal::resultJSR.tagGPR(); #endif break; - case AccessType::DeleteByID: + case AccessType::DeleteByIdStrict: + case AccessType::DeleteByIdSloppy: hasConstantIdentifier = true; m_baseGPR = BaselineJITRegisters::DelById::baseJSR.payloadGPR(); m_extraGPR = InvalidGPRReg; - m_valueGPR = BaselineJITRegisters::DelById::FastPath::resultJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::DelById::FastPath::stubInfoGPR; + m_valueGPR = BaselineJITRegisters::DelById::resultJSR.payloadGPR(); + m_stubInfoGPR = BaselineJITRegisters::DelById::stubInfoGPR; #if USE(JSVALUE32_64) m_baseTagGPR = BaselineJITRegisters::DelById::baseJSR.tagGPR(); m_extraTagGPR = InvalidGPRReg; - m_valueTagGPR = BaselineJITRegisters::DelById::FastPath::resultJSR.tagGPR(); + m_valueTagGPR = BaselineJITRegisters::DelById::resultJSR.tagGPR(); #endif break; case AccessType::GetByVal: @@ -579,7 +591,9 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_baseGPR = BaselineJITRegisters::GetByVal::baseJSR.payloadGPR(); m_extraGPR = BaselineJITRegisters::GetByVal::propertyJSR.payloadGPR(); m_valueGPR = BaselineJITRegisters::GetByVal::resultJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::GetByVal::FastPath::stubInfoGPR; + m_stubInfoGPR = BaselineJITRegisters::GetByVal::stubInfoGPR; + if (accessType == AccessType::GetByVal) + m_arrayProfileGPR = BaselineJITRegisters::GetByVal::profileGPR; #if USE(JSVALUE32_64) m_baseTagGPR = BaselineJITRegisters::GetByVal::baseJSR.tagGPR(); m_extraTagGPR = BaselineJITRegisters::GetByVal::propertyJSR.tagGPR(); @@ -592,7 +606,7 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_baseGPR = BaselineJITRegisters::Instanceof::valueJSR.payloadGPR(); m_valueGPR = BaselineJITRegisters::Instanceof::resultJSR.payloadGPR(); m_extraGPR = BaselineJITRegisters::Instanceof::protoJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::Instanceof::FastPath::stubInfoGPR; + m_stubInfoGPR = BaselineJITRegisters::Instanceof::stubInfoGPR; #if USE(JSVALUE32_64) m_baseTagGPR = BaselineJITRegisters::Instanceof::valueJSR.tagGPR(); m_valueTagGPR = InvalidGPRReg; @@ -607,6 +621,8 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_extraGPR = BaselineJITRegisters::InByVal::propertyJSR.payloadGPR(); m_valueGPR = BaselineJITRegisters::InByVal::resultJSR.payloadGPR(); m_stubInfoGPR = BaselineJITRegisters::InByVal::stubInfoGPR; + if (accessType == AccessType::InByVal) + m_arrayProfileGPR = BaselineJITRegisters::InByVal::profileGPR; #if USE(JSVALUE32_64) m_baseTagGPR = BaselineJITRegisters::InByVal::baseJSR.tagGPR(); m_extraTagGPR = BaselineJITRegisters::InByVal::propertyJSR.tagGPR(); @@ -633,7 +649,7 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_extraGPR = InvalidGPRReg; m_baseGPR = BaselineJITRegisters::GetById::baseJSR.payloadGPR(); m_valueGPR = BaselineJITRegisters::GetById::resultJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::GetById::FastPath::stubInfoGPR; + m_stubInfoGPR = BaselineJITRegisters::GetById::stubInfoGPR; #if USE(JSVALUE32_64) m_extraTagGPR = InvalidGPRReg; m_baseTagGPR = BaselineJITRegisters::GetById::baseJSR.tagGPR(); @@ -645,7 +661,7 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_baseGPR = BaselineJITRegisters::GetByIdWithThis::baseJSR.payloadGPR(); m_valueGPR = BaselineJITRegisters::GetByIdWithThis::resultJSR.payloadGPR(); m_extraGPR = BaselineJITRegisters::GetByIdWithThis::thisJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::GetByIdWithThis::FastPath::stubInfoGPR; + m_stubInfoGPR = BaselineJITRegisters::GetByIdWithThis::stubInfoGPR; #if USE(JSVALUE32_64) m_baseTagGPR = BaselineJITRegisters::GetByIdWithThis::baseJSR.tagGPR(); m_valueTagGPR = BaselineJITRegisters::GetByIdWithThis::resultJSR.tagGPR(); @@ -659,7 +675,8 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_valueGPR = BaselineJITRegisters::GetByValWithThis::resultJSR.payloadGPR(); m_extraGPR = BaselineJITRegisters::GetByValWithThis::thisJSR.payloadGPR(); m_extra2GPR = BaselineJITRegisters::GetByValWithThis::propertyJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::GetByValWithThis::FastPath::stubInfoGPR; + m_stubInfoGPR = BaselineJITRegisters::GetByValWithThis::stubInfoGPR; + m_arrayProfileGPR = BaselineJITRegisters::GetByValWithThis::profileGPR; #else // Registers are exhausted, we cannot have this IC on 32bit. RELEASE_ASSERT_NOT_REACHED(); @@ -675,7 +692,7 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn m_extraGPR = InvalidGPRReg; m_baseGPR = BaselineJITRegisters::PutById::baseJSR.payloadGPR(); m_valueGPR = BaselineJITRegisters::PutById::valueJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::PutById::FastPath::stubInfoGPR; + m_stubInfoGPR = BaselineJITRegisters::PutById::stubInfoGPR; #if USE(JSVALUE32_64) m_extraTagGPR = InvalidGPRReg; m_baseTagGPR = BaselineJITRegisters::PutById::baseJSR.tagGPR(); @@ -706,12 +723,12 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn hasConstantIdentifier = false; m_valueGPR = InvalidGPRReg; m_baseGPR = BaselineJITRegisters::PrivateBrand::baseJSR.payloadGPR(); - m_extraGPR = BaselineJITRegisters::PrivateBrand::brandJSR.payloadGPR(); - m_stubInfoGPR = BaselineJITRegisters::PrivateBrand::FastPath::stubInfoGPR; + m_extraGPR = BaselineJITRegisters::PrivateBrand::propertyJSR.payloadGPR(); + m_stubInfoGPR = BaselineJITRegisters::PrivateBrand::stubInfoGPR; #if USE(JSVALUE32_64) m_valueTagGPR = InvalidGPRReg; m_baseTagGPR = BaselineJITRegisters::PrivateBrand::baseJSR.tagGPR(); - m_extraTagGPR = BaselineJITRegisters::PrivateBrand::brandJSR.tagGPR(); + m_extraTagGPR = BaselineJITRegisters::PrivateBrand::propertyJSR.tagGPR(); #endif break; } @@ -720,12 +737,15 @@ void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(const BaselineUn #if ENABLE(DFG_JIT) void StructureStubInfo::initializeFromDFGUnlinkedStructureStubInfo(const DFG::UnlinkedStructureStubInfo& unlinkedStubInfo) { + ASSERT(!isCompilationThread()); accessType = unlinkedStubInfo.accessType; doneLocation = unlinkedStubInfo.doneLocation; - slowPathStartLocation = unlinkedStubInfo.slowPathStartLocation; + m_identifier = unlinkedStubInfo.m_identifier; callSiteIndex = unlinkedStubInfo.callSiteIndex; codeOrigin = unlinkedStubInfo.codeOrigin; - m_codePtr = slowPathStartLocation; + m_handler = InlineCacheHandler::createNonHandlerSlowPath(unlinkedStubInfo.slowPathStartLocation); + m_codePtr = m_handler->callTarget(); + slowPathStartLocation = unlinkedStubInfo.slowPathStartLocation; propertyIsInt32 = unlinkedStubInfo.propertyIsInt32; propertyIsSymbol = unlinkedStubInfo.propertyIsSymbol; @@ -768,6 +788,54 @@ void StructureStubInfo::checkConsistency() } #endif // ASSERT_ENABLED +RefPtr SharedJITStubSet::getMegamorphic(AccessType type) const +{ + switch (type) { + case AccessType::GetByVal: + return m_getByValMegamorphic; + case AccessType::GetByValWithThis: + return m_getByValWithThisMegamorphic; + case AccessType::PutByValStrict: + case AccessType::PutByValSloppy: + return m_putByValMegamorphic; + case AccessType::InByVal: + return m_inByValMegamorphic; + default: + return nullptr; + } +} + +void SharedJITStubSet::setMegamorphic(AccessType type, Ref stub) +{ + switch (type) { + case AccessType::GetByVal: + m_getByValMegamorphic = WTFMove(stub); + break; + case AccessType::GetByValWithThis: + m_getByValWithThisMegamorphic = WTFMove(stub); + break; + case AccessType::PutByValStrict: + case AccessType::PutByValSloppy: + m_putByValMegamorphic = WTFMove(stub); + break; + case AccessType::InByVal: + m_inByValMegamorphic = WTFMove(stub); + break; + default: + break; + } +} + +RefPtr SharedJITStubSet::getSlowPathHandler(AccessType type) const +{ + return m_slowPathHandlers[static_cast(type)]; +} + +void SharedJITStubSet::setSlowPathHandler(AccessType type, Ref handler) +{ + m_slowPathHandlers[static_cast(type)] = WTFMove(handler); +} + #endif // ENABLE(JIT) } // namespace JSC diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.h index 2c5e8dba..f1418a6e 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/StructureStubInfo.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008-2021 Apple Inc. All rights reserved. + * Copyright (C) 2008-2023 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -40,6 +40,7 @@ #include "StubInfoSummary.h" #include #include +#include namespace JSC { @@ -53,38 +54,50 @@ class AccessCase; class AccessGenerationResult; class PolymorphicAccess; +#define JSC_FOR_EACH_STRUCTURE_STUB_INFO_ACCESS_TYPE(macro) \ + macro(GetById) \ + macro(GetByIdWithThis) \ + macro(GetByIdDirect) \ + macro(TryGetById) \ + macro(GetByVal) \ + macro(GetByValWithThis) \ + macro(PutByIdStrict) \ + macro(PutByIdSloppy) \ + macro(PutByIdDirectStrict) \ + macro(PutByIdDirectSloppy) \ + macro(PutByValStrict) \ + macro(PutByValSloppy) \ + macro(PutByValDirectStrict) \ + macro(PutByValDirectSloppy) \ + macro(DefinePrivateNameByVal) \ + macro(DefinePrivateNameById) \ + macro(SetPrivateNameByVal) \ + macro(SetPrivateNameById) \ + macro(InById) \ + macro(InByVal) \ + macro(HasPrivateName) \ + macro(HasPrivateBrand) \ + macro(InstanceOf) \ + macro(DeleteByIdStrict) \ + macro(DeleteByIdSloppy) \ + macro(DeleteByValStrict) \ + macro(DeleteByValSloppy) \ + macro(GetPrivateName) \ + macro(GetPrivateNameById) \ + macro(CheckPrivateBrand) \ + macro(SetPrivateBrand) \ + + enum class AccessType : int8_t { - GetById, - GetByIdWithThis, - GetByIdDirect, - TryGetById, - GetByVal, - GetByValWithThis, - PutByIdStrict, - PutByIdSloppy, - PutByIdDirectStrict, - PutByIdDirectSloppy, - PutByValStrict, - PutByValSloppy, - PutByValDirectStrict, - PutByValDirectSloppy, - DefinePrivateNameByVal, - DefinePrivateNameById, - SetPrivateNameByVal, - SetPrivateNameById, - InById, - InByVal, - HasPrivateName, - HasPrivateBrand, - InstanceOf, - DeleteByID, - DeleteByVal, - GetPrivateName, - GetPrivateNameById, - CheckPrivateBrand, - SetPrivateBrand, +#define JSC_DEFINE_ACCESS_TYPE(name) name, + JSC_FOR_EACH_STRUCTURE_STUB_INFO_ACCESS_TYPE(JSC_DEFINE_ACCESS_TYPE) +#undef JSC_DEFINE_ACCESS_TYPE }; +#define JSC_INCREMENT_ACCESS_TYPE(name) + 1 +static constexpr unsigned numberOfAccessTypes = 0 JSC_FOR_EACH_STRUCTURE_STUB_INFO_ACCESS_TYPE(JSC_INCREMENT_ACCESS_TYPE); +#undef JSC_INCREMENT_ACCESS_TYPE + enum class CacheType : int8_t { Unset, GetByIdSelf, @@ -100,7 +113,7 @@ struct BaselineUnlinkedStructureStubInfo; class StructureStubInfo { WTF_MAKE_NONCOPYABLE(StructureStubInfo); - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(StructureStubInfo); public: StructureStubInfo(AccessType accessType, CodeOrigin codeOrigin) : codeOrigin(codeOrigin) @@ -115,11 +128,11 @@ class StructureStubInfo { ~StructureStubInfo(); - void initGetByIdSelf(const ConcurrentJSLockerBase&, CodeBlock*, Structure* inlineAccessBaseStructure, PropertyOffset, CacheableIdentifier); + void initGetByIdSelf(const ConcurrentJSLockerBase&, CodeBlock*, Structure* inlineAccessBaseStructure, PropertyOffset); void initArrayLength(const ConcurrentJSLockerBase&); void initStringLength(const ConcurrentJSLockerBase&); - void initPutByIdReplace(const ConcurrentJSLockerBase&, CodeBlock*, Structure* inlineAccessBaseStructure, PropertyOffset, CacheableIdentifier); - void initInByIdSelf(const ConcurrentJSLockerBase&, CodeBlock*, Structure* inlineAccessBaseStructure, PropertyOffset, CacheableIdentifier); + void initPutByIdReplace(const ConcurrentJSLockerBase&, CodeBlock*, Structure* inlineAccessBaseStructure, PropertyOffset); + void initInByIdSelf(const ConcurrentJSLockerBase&, CodeBlock*, Structure* inlineAccessBaseStructure, PropertyOffset); AccessGenerationResult addAccessCase(const GCSafeConcurrentJSLocker&, JSGlobalObject*, CodeBlock*, ECMAMode, CacheableIdentifier, RefPtr); @@ -128,7 +141,7 @@ class StructureStubInfo { void deref(); void aboutToDie(); - void initializeFromUnlinkedStructureStubInfo(const BaselineUnlinkedStructureStubInfo&); + void initializeFromUnlinkedStructureStubInfo(VM&, const BaselineUnlinkedStructureStubInfo&); void initializeFromDFGUnlinkedStructureStubInfo(const DFG::UnlinkedStructureStubInfo&); DECLARE_VISIT_AGGREGATE; @@ -144,22 +157,7 @@ class StructureStubInfo { static StubInfoSummary summary(VM&, const StructureStubInfo*); - CacheableIdentifier identifier() - { - switch (m_cacheType) { - case CacheType::Unset: - case CacheType::ArrayLength: - case CacheType::StringLength: - case CacheType::Stub: - RELEASE_ASSERT_NOT_REACHED(); - break; - case CacheType::PutByIdReplace: - case CacheType::InByIdSelf: - case CacheType::GetByIdSelf: - break; - } - return m_identifier; - } + CacheableIdentifier identifier() const { return m_identifier; } bool containsPC(void* pc) const; @@ -375,6 +373,8 @@ class StructureStubInfo { static ptrdiff_t offsetOfSlowPathStartLocation() { return OBJECT_OFFSETOF(StructureStubInfo, slowPathStartLocation); } static ptrdiff_t offsetOfSlowOperation() { return OBJECT_OFFSETOF(StructureStubInfo, m_slowOperation); } static ptrdiff_t offsetOfCountdown() { return OBJECT_OFFSETOF(StructureStubInfo, countdown); } + static ptrdiff_t offsetOfCallSiteIndex() { return OBJECT_OFFSETOF(StructureStubInfo, callSiteIndex); } + static ptrdiff_t offsetOfHandler() { return OBJECT_OFFSETOF(StructureStubInfo, m_handler); } GPRReg thisGPR() const { return m_extraGPR; } GPRReg prototypeGPR() const { return m_extraGPR; } @@ -406,15 +406,7 @@ class StructureStubInfo { CodeOrigin codeOrigin { }; PropertyOffset byIdSelfOffset; WriteBarrierStructureID m_inlineAccessBaseStructureID; - std::unique_ptr m_stub; -private: CacheableIdentifier m_identifier; - // Represents those structures that already have buffered AccessCases in the PolymorphicAccess. - // Note that it's always safe to clear this. If we clear it prematurely, then if we see the same - // structure again during this buffering countdown, we will create an AccessCase object for it. - // That's not so bad - we'll get rid of the redundant ones once we regenerate. - HashSet m_bufferedStructures WTF_GUARDED_BY_LOCK(m_bufferedStructuresLock); -public: // This is either the start of the inline IC for *byId caches. or the location of patchable jump for 'instanceof' caches. // If useDataIC is true, then it is nullptr. CodeLocationLabel startLocation; @@ -427,6 +419,15 @@ class StructureStubInfo { }; CodePtr m_codePtr; + std::unique_ptr m_stub; + RefPtr m_handler; +private: + // Represents those structures that already have buffered AccessCases in the PolymorphicAccess. + // Note that it's always safe to clear this. If we clear it prematurely, then if we see the same + // structure again during this buffering countdown, we will create an AccessCase object for it. + // That's not so bad - we'll get rid of the redundant ones once we regenerate. + HashSet m_bufferedStructures WTF_GUARDED_BY_LOCK(m_bufferedStructuresLock); +public: ScalarRegisterSet usedRegisters; @@ -547,14 +548,170 @@ struct UnlinkedStructureStubInfo { bool prototypeIsKnownObject : 1 { false }; bool canBeMegamorphic : 1 { false }; bool isEnumerator : 1 { false }; + CacheableIdentifier m_identifier; // This only comes from already marked one. Thus, we do not mark it via GC. CodeLocationLabel doneLocation; CodeLocationLabel slowPathStartLocation; }; -struct BaselineUnlinkedStructureStubInfo : UnlinkedStructureStubInfo { +struct BaselineUnlinkedStructureStubInfo : JSC::UnlinkedStructureStubInfo { BytecodeIndex bytecodeIndex; }; +class SharedJITStubSet { + WTF_MAKE_FAST_ALLOCATED(SharedJITStubSet); +public: + SharedJITStubSet() = default; + + struct Hash { + struct Key { + Key() = default; + + Key(GPRReg baseGPR, GPRReg valueGPR, GPRReg extraGPR, GPRReg extra2GPR, GPRReg stubInfoGPR, GPRReg arrayProfileGPR, ScalarRegisterSet usedRegisters, PolymorphicAccessJITStubRoutine* wrapped) + : m_wrapped(wrapped) + , m_baseGPR(baseGPR) + , m_valueGPR(valueGPR) + , m_extraGPR(extraGPR) + , m_extra2GPR(extra2GPR) + , m_stubInfoGPR(stubInfoGPR) + , m_arrayProfileGPR(arrayProfileGPR) + , m_usedRegisters(usedRegisters) + { } + + Key(WTF::HashTableDeletedValueType) + : m_wrapped(bitwise_cast(static_cast(1))) + { } + + bool isHashTableDeletedValue() const { return m_wrapped == bitwise_cast(static_cast(1)); } + + friend bool operator==(const Key&, const Key&) = default; + + PolymorphicAccessJITStubRoutine* m_wrapped { nullptr }; + GPRReg m_baseGPR; + GPRReg m_valueGPR; + GPRReg m_extraGPR; + GPRReg m_extra2GPR; + GPRReg m_stubInfoGPR; + GPRReg m_arrayProfileGPR; + ScalarRegisterSet m_usedRegisters; + }; + + using KeyTraits = SimpleClassHashTraits; + + static unsigned hash(const Key& p) + { + if (!p.m_wrapped) + return 1; + return p.m_wrapped->hash(); + } + + static bool equal(const Key& a, const Key& b) + { + return a == b; + } + + static constexpr bool safeToCompareToEmptyOrDeleted = false; + }; + + struct Searcher { + struct Translator { + static unsigned hash(const Searcher& searcher) + { + return PolymorphicAccessJITStubRoutine::computeHash(searcher.m_cases, searcher.m_weakStructures); + } + + static bool equal(const Hash::Key a, const Searcher& b) + { + if (a.m_baseGPR == b.m_baseGPR + && a.m_valueGPR == b.m_valueGPR + && a.m_extraGPR == b.m_extraGPR + && a.m_extra2GPR == b.m_extra2GPR + && a.m_stubInfoGPR == b.m_stubInfoGPR + && a.m_arrayProfileGPR == b.m_arrayProfileGPR + && a.m_usedRegisters == b.m_usedRegisters) { + // FIXME: The ordering of cases does not matter for sharing capabilities. + // We can potentially increase success rate by making this comparison / hashing non ordering sensitive. + const auto& aCases = a.m_wrapped->cases(); + const auto& bCases = b.m_cases; + if (aCases.size() != bCases.size()) + return false; + for (unsigned index = 0; index < bCases.size(); ++index) { + if (!AccessCase::canBeShared(*aCases[index], *bCases[index])) + return false; + } + const auto& aWeak = a.m_wrapped->weakStructures(); + const auto& bWeak = b.m_weakStructures; + if (aWeak.size() != bWeak.size()) + return false; + for (unsigned i = 0, size = aWeak.size(); i < size; ++i) { + if (aWeak[i] != bWeak[i]) + return false; + } + return true; + } + return false; + } + }; + + GPRReg m_baseGPR; + GPRReg m_valueGPR; + GPRReg m_extraGPR; + GPRReg m_extra2GPR; + GPRReg m_stubInfoGPR; + GPRReg m_arrayProfileGPR; + ScalarRegisterSet m_usedRegisters; + const FixedVector>& m_cases; + const FixedVector& m_weakStructures; + }; + + struct PointerTranslator { + static unsigned hash(const PolymorphicAccessJITStubRoutine* stub) + { + return stub->hash(); + } + + static bool equal(const Hash::Key& key, const PolymorphicAccessJITStubRoutine* stub) + { + return key.m_wrapped == stub; + } + }; + + void add(Hash::Key&& key) + { + m_stubs.add(WTFMove(key)); + } + + void remove(PolymorphicAccessJITStubRoutine* stub) + { + auto iter = m_stubs.find(stub); + if (iter != m_stubs.end()) + m_stubs.remove(iter); + } + + PolymorphicAccessJITStubRoutine* find(const Searcher& searcher) + { + auto entry = m_stubs.find(searcher); + if (entry != m_stubs.end()) + return entry->m_wrapped; + return nullptr; + } + + RefPtr getMegamorphic(AccessType) const; + void setMegamorphic(AccessType, Ref); + + RefPtr getSlowPathHandler(AccessType) const; + void setSlowPathHandler(AccessType, Ref); + +private: + HashSet m_stubs; + + RefPtr m_getByValMegamorphic; + RefPtr m_getByValWithThisMegamorphic; + RefPtr m_putByValMegamorphic; + RefPtr m_inByValMegamorphic; + std::array, numberOfAccessTypes> m_fallbackHandlers { }; + std::array, numberOfAccessTypes> m_slowPathHandlers { }; +}; + #else class StructureStubInfo; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp index 50c0ea76..cd2faf37 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2021 Apple Inc. All Rights Reserved. + * Copyright (C) 2012-2024 Apple Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -60,7 +60,6 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(VM& vm, Structure* structure, CodeType code , m_derivedContextType(static_cast(info.derivedContextType())) , m_evalContextType(static_cast(info.evalContextType())) , m_codeType(static_cast(codeType)) - , m_didOptimize(static_cast(TriState::Indeterminate)) , m_age(0) , m_hasCheckpoints(false) , m_parseMode(info.parseMode()) @@ -69,7 +68,6 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(VM& vm, Structure* structure, CodeType code { ASSERT(m_constructorKind == static_cast(info.constructorKind())); ASSERT(m_codeType == static_cast(codeType)); - ASSERT(m_didOptimize == static_cast(TriState::Indeterminate)); if (info.needsClassFieldInitializer() == NeedsClassFieldInitializer::Yes) { Locker locker { cellLock() }; createRareDataIfNecessary(locker); @@ -114,6 +112,7 @@ void UnlinkedCodeBlock::visitChildrenImpl(JSCell* cell, Visitor& visitor) if (thisObject->hasRareData()) extraMemory += thisObject->m_rareData->sizeInBytes(locker); + extraMemory += thisObject->m_expressionInfo->byteSize(); extraMemory += thisObject->m_jumpTargets.byteSize(); extraMemory += thisObject->m_identifiers.byteSize(); extraMemory += thisObject->m_constantRegisters.byteSize(); @@ -141,7 +140,6 @@ size_t UnlinkedCodeBlock::RareData::sizeInBytes(const AbstractLocker&) const size += m_exceptionHandlers.byteSize(); size += m_unlinkedSwitchJumpTables.byteSize(); size += m_unlinkedStringSwitchJumpTables.byteSize(); - size += m_expressionInfoFatPositions.byteSize(); size += m_typeProfilerInfoMap.capacity() * sizeof(decltype(m_typeProfilerInfoMap)::KeyValuePairType); size += m_opProfileControlFlowBytecodeOffsets.byteSize(); size += m_bitVectors.byteSize(); @@ -152,40 +150,18 @@ size_t UnlinkedCodeBlock::RareData::sizeInBytes(const AbstractLocker&) const return size; } -int UnlinkedCodeBlock::lineNumberForBytecodeIndex(BytecodeIndex bytecodeIndex) +LineColumn UnlinkedCodeBlock::lineColumnForBytecodeIndex(BytecodeIndex bytecodeIndex) { - ASSERT(bytecodeIndex.offset() < instructions().size()); - int divot { 0 }; - int startOffset { 0 }; - int endOffset { 0 }; - unsigned line { 0 }; - unsigned column { 0 }; - expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column); - return line; + return m_expressionInfo->lineColumnForInstPC(bytecodeIndex.offset()); } -inline void UnlinkedCodeBlock::getLineAndColumn(const ExpressionRangeInfo& info, - unsigned& line, unsigned& column) const +ExpressionInfo::Entry UnlinkedCodeBlock::expressionInfoForBytecodeIndex(BytecodeIndex bytecodeIndex) { - switch (info.mode) { - case ExpressionRangeInfo::FatLineMode: - info.decodeFatLineMode(line, column); - break; - case ExpressionRangeInfo::FatColumnMode: - info.decodeFatColumnMode(line, column); - break; - case ExpressionRangeInfo::FatLineAndColumnMode: { - unsigned fatIndex = info.position; - ExpressionRangeInfo::FatPosition& fatPos = m_rareData->m_expressionInfoFatPositions[fatIndex]; - line = fatPos.line; - column = fatPos.column; - break; - } - } // switch + return m_expressionInfo->entryForInstPC(bytecodeIndex.offset()); } #ifndef NDEBUG -static void dumpLineColumnEntry(size_t index, const JSInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) +static void dumpExpressionInfoDetails(size_t index, const JSInstructionStream& instructionStream, unsigned instructionOffset, LineColumn lineColumn, unsigned divot, unsigned startOffset, unsigned endOffset) { const auto instruction = instructionStream.at(instructionOffset); const char* event = ""; @@ -200,62 +176,23 @@ static void dumpLineColumnEntry(size_t index, const JSInstructionStream& instruc case WillExecuteExpression: event = " WillExecuteExpression"; break; } } - dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, instruction->name(), event); + dataLogF(" [%zu] pc %u @ line %u col %u divot %u startOffset %u endOffset %u : %s%s\n", index, instructionOffset, lineColumn.line, lineColumn.column, divot, startOffset, endOffset, instruction->name(), event); } -void UnlinkedCodeBlock::dumpExpressionRangeInfo() +void UnlinkedCodeBlock::dumpExpressionInfo() { - FixedVector& expressionInfo = m_expressionInfo; - - size_t size = m_expressionInfo.size(); - dataLogF("UnlinkedCodeBlock %p expressionRangeInfo[%zu] {\n", this, size); - for (size_t i = 0; i < size; i++) { - ExpressionRangeInfo& info = expressionInfo[i]; - unsigned line; - unsigned column; - getLineAndColumn(info, line, column); - dumpLineColumnEntry(i, instructions(), info.instructionOffset, line, column); + size_t index = 0; + dataLogF("UnlinkedCodeBlock %p expressionInfo[] {\n", this); + + ExpressionInfo::Decoder decoder(*m_expressionInfo); + while (decoder.decode() != IterationStatus::Done) { + dumpExpressionInfoDetails(index, instructions(), decoder.instPC(), decoder.lineColumn(), decoder.divot(), decoder.startOffset(), decoder.endOffset()); + index++; } dataLog("}\n"); } #endif -void UnlinkedCodeBlock::expressionRangeForBytecodeIndex(BytecodeIndex bytecodeIndex, - int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const -{ - ASSERT(bytecodeIndex.offset() < instructions().size()); - - if (!m_expressionInfo.size()) { - startOffset = 0; - endOffset = 0; - divot = 0; - line = 0; - column = 0; - return; - } - - const FixedVector& expressionInfo = m_expressionInfo; - - int low = 0; - int high = expressionInfo.size(); - while (low < high) { - int mid = low + (high - low) / 2; - if (expressionInfo[mid].instructionOffset <= bytecodeIndex.offset()) - low = mid + 1; - else - high = mid; - } - - if (!low) - low = 1; - - const ExpressionRangeInfo& info = expressionInfo[low - 1]; - startOffset = info.startOffset; - endOffset = info.endOffset; - divot = info.divotPoint; - getLineAndColumn(info, line, column); -} - bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot) { static constexpr bool verbose = false; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h index 3f7334c8..606a6ee4 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2023 Apple Inc. All Rights Reserved. + * Copyright (C) 2012-2024 Apple Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "CodeType.h" #include "DFGExitProfile.h" #include "ExecutionCounter.h" -#include "ExpressionRangeInfo.h" +#include "ExpressionInfo.h" #include "HandlerInfo.h" #include "Identifier.h" #include "InstructionStream.h" @@ -156,8 +156,7 @@ class UnlinkedCodeBlock : public JSCell { bool allowDirectEvalCache() const { return !(m_features & NoEvalCacheFeature); } bool usesImportMeta() const { return m_features & ImportMetaFeature; } - bool hasExpressionInfo() { return m_expressionInfo.size(); } - const FixedVector& expressionInfo() { return m_expressionInfo; } + bool hasExpressionInfo() { return !m_expressionInfo->isEmpty(); } bool hasCheckpoints() const { return m_hasCheckpoints; } void setHasCheckpoints() { m_hasCheckpoints = true; } @@ -250,10 +249,8 @@ class UnlinkedCodeBlock : public JSCell { bool hasRareData() const { return m_rareData.get(); } - int lineNumberForBytecodeIndex(BytecodeIndex); - - void expressionRangeForBytecodeIndex(BytecodeIndex, int& divot, - int& startOffset, int& endOffset, unsigned& line, unsigned& column) const; + ExpressionInfo::Entry expressionInfoForBytecodeIndex(BytecodeIndex); + LineColumn lineColumnForBytecodeIndex(BytecodeIndex); bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot); @@ -289,15 +286,15 @@ class UnlinkedCodeBlock : public JSCell { return m_rareData && !m_rareData->m_opProfileControlFlowBytecodeOffsets.isEmpty(); } - void dumpExpressionRangeInfo(); // For debugging purpose only. + void dumpExpressionInfo(); // For debugging purpose only. bool wasCompiledWithDebuggingOpcodes() const { return m_codeGenerationMode.contains(CodeGenerationMode::Debugger); } bool wasCompiledWithTypeProfilerOpcodes() const { return m_codeGenerationMode.contains(CodeGenerationMode::TypeProfiler); } bool wasCompiledWithControlFlowProfilerOpcodes() const { return m_codeGenerationMode.contains(CodeGenerationMode::ControlFlowProfiler); } OptionSet codeGenerationMode() const { return m_codeGenerationMode; } - TriState didOptimize() const { return static_cast(m_didOptimize); } - void setDidOptimize(TriState didOptimize) { m_didOptimize = static_cast(didOptimize); } + TriState didOptimize() const { return m_metadata->didOptimize(); } + void setDidOptimize(TriState didOptimize) { m_metadata->setDidOptimize(didOptimize); } static constexpr unsigned maxAge = 7; @@ -391,10 +388,8 @@ class UnlinkedCodeBlock : public JSCell { m_rareData = makeUnique(); } - void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const; BytecodeLivenessAnalysis& livenessAnalysisSlow(CodeBlock*); - VirtualRegister m_thisRegister; VirtualRegister m_scopeRegister; @@ -414,7 +409,6 @@ class UnlinkedCodeBlock : public JSCell { unsigned m_derivedContextType : 2; unsigned m_evalContextType : 2; unsigned m_codeType : 2; - unsigned m_didOptimize : 2; unsigned m_age : 3; static_assert(((1U << 3) - 1) >= maxAge); bool m_hasCheckpoints : 1; @@ -464,8 +458,6 @@ class UnlinkedCodeBlock : public JSCell { FixedVector m_unlinkedSwitchJumpTables; FixedVector m_unlinkedStringSwitchJumpTables; - FixedVector m_expressionInfoFatPositions; - struct TypeProfilerExpressionRange { unsigned m_startDivot; unsigned m_endDivot; @@ -500,7 +492,7 @@ class UnlinkedCodeBlock : public JSCell { OutOfLineJumpTargets m_outOfLineJumpTargets; std::unique_ptr m_rareData; - FixedVector m_expressionInfo; + MallocPtr m_expressionInfo; BaselineExecutionCounter m_llintExecuteCounter; FixedVector m_valueProfiles; FixedVector m_arrayProfiles; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp index 0b4c7354..57493c86 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2021 Apple Inc. All Rights Reserved. + * Copyright (C) 2019-2024 Apple Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,82 +27,20 @@ #include "UnlinkedCodeBlockGenerator.h" #include "BytecodeRewriter.h" +#include "ExpressionInfoInlines.h" #include "InstructionStream.h" #include "JSCJSValueInlines.h" #include "PreciseJumpTargets.h" #include "UnlinkedMetadataTableInlines.h" +#include namespace JSC { -inline void UnlinkedCodeBlockGenerator::getLineAndColumn(const ExpressionRangeInfo& info, unsigned& line, unsigned& column) const -{ - switch (info.mode) { - case ExpressionRangeInfo::FatLineMode: - info.decodeFatLineMode(line, column); - break; - case ExpressionRangeInfo::FatColumnMode: - info.decodeFatColumnMode(line, column); - break; - case ExpressionRangeInfo::FatLineAndColumnMode: { - unsigned fatIndex = info.position; - const ExpressionRangeInfo::FatPosition& fatPos = m_expressionInfoFatPositions[fatIndex]; - line = fatPos.line; - column = fatPos.column; - break; - } - } // switch -} +WTF_MAKE_TZONE_ALLOCATED_IMPL(UnlinkedCodeBlockGenerator); -void UnlinkedCodeBlockGenerator::addExpressionInfo(unsigned instructionOffset, int divot, int startOffset, int endOffset, unsigned line, unsigned column) +void UnlinkedCodeBlockGenerator::addExpressionInfo(unsigned instructionOffset, unsigned divot, unsigned startOffset, unsigned endOffset, LineColumn lineColumn) { - if (divot > ExpressionRangeInfo::MaxDivot) { - // Overflow has occurred, we can only give line number info for errors for this region - divot = 0; - startOffset = 0; - endOffset = 0; - } else if (startOffset > ExpressionRangeInfo::MaxOffset) { - // If the start offset is out of bounds we clear both offsets - // so we only get the divot marker. Error message will have to be reduced - // to line and charPosition number. - startOffset = 0; - endOffset = 0; - } else if (endOffset > ExpressionRangeInfo::MaxOffset) { - // The end offset is only used for additional context, and is much more likely - // to overflow (eg. function call arguments) so we are willing to drop it without - // dropping the rest of the range. - endOffset = 0; - } - - unsigned positionMode = - (line <= ExpressionRangeInfo::MaxFatLineModeLine && column <= ExpressionRangeInfo::MaxFatLineModeColumn) - ? ExpressionRangeInfo::FatLineMode - : (line <= ExpressionRangeInfo::MaxFatColumnModeLine && column <= ExpressionRangeInfo::MaxFatColumnModeColumn) - ? ExpressionRangeInfo::FatColumnMode - : ExpressionRangeInfo::FatLineAndColumnMode; - - ExpressionRangeInfo info; - info.instructionOffset = instructionOffset; - info.divotPoint = divot; - info.startOffset = startOffset; - info.endOffset = endOffset; - - info.mode = positionMode; - switch (positionMode) { - case ExpressionRangeInfo::FatLineMode: - info.encodeFatLineMode(line, column); - break; - case ExpressionRangeInfo::FatColumnMode: - info.encodeFatColumnMode(line, column); - break; - case ExpressionRangeInfo::FatLineAndColumnMode: { - unsigned fatIndex = m_expressionInfoFatPositions.size(); - ExpressionRangeInfo::FatPosition fatPos = { line, column }; - m_expressionInfoFatPositions.append(fatPos); - info.position = fatIndex; - } - } // switch - - m_expressionInfo.append(info); + m_expressionInfoEncoder.encode(instructionOffset, divot, startOffset, endOffset, lineColumn); } void UnlinkedCodeBlockGenerator::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot) @@ -128,14 +66,14 @@ void UnlinkedCodeBlockGenerator::finalize(std::unique_ptr i m_codeBlock->m_constantsSourceCodeRepresentation = WTFMove(m_constantsSourceCodeRepresentation); m_codeBlock->m_functionDecls = WTFMove(m_functionDecls); m_codeBlock->m_functionExprs = WTFMove(m_functionExprs); - m_codeBlock->m_expressionInfo = WTFMove(m_expressionInfo); + m_codeBlock->m_expressionInfo = m_expressionInfoEncoder.createExpressionInfo(); + m_codeBlock->m_outOfLineJumpTargets = WTFMove(m_outOfLineJumpTargets); if (!m_codeBlock->m_rareData) { if (!m_exceptionHandlers.isEmpty() || !m_unlinkedSwitchJumpTables.isEmpty() || !m_unlinkedStringSwitchJumpTables.isEmpty() - || !m_expressionInfoFatPositions.isEmpty() || !m_typeProfilerInfoMap.isEmpty() || !m_opProfileControlFlowBytecodeOffsets.isEmpty() || !m_bitVectors.isEmpty() @@ -146,7 +84,6 @@ void UnlinkedCodeBlockGenerator::finalize(std::unique_ptr i m_codeBlock->m_rareData->m_exceptionHandlers = WTFMove(m_exceptionHandlers); m_codeBlock->m_rareData->m_unlinkedSwitchJumpTables = WTFMove(m_unlinkedSwitchJumpTables); m_codeBlock->m_rareData->m_unlinkedStringSwitchJumpTables = WTFMove(m_unlinkedStringSwitchJumpTables); - m_codeBlock->m_rareData->m_expressionInfoFatPositions = WTFMove(m_expressionInfoFatPositions); m_codeBlock->m_rareData->m_typeProfilerInfoMap = WTFMove(m_typeProfilerInfoMap); m_codeBlock->m_rareData->m_opProfileControlFlowBytecodeOffsets = WTFMove(m_opProfileControlFlowBytecodeOffsets); m_codeBlock->m_rareData->m_bitVectors = WTFMove(m_bitVectors); @@ -157,7 +94,7 @@ void UnlinkedCodeBlockGenerator::finalize(std::unique_ptr i m_codeBlock->initializeLoopHintExecutionCounter(); } m_vm.writeBarrier(m_codeBlock.get()); - m_vm.heap.reportExtraMemoryAllocated(m_codeBlock->m_instructions->sizeInBytes() + m_codeBlock->metadataSizeInBytes()); + m_vm.heap.reportExtraMemoryAllocated(m_codeBlock.get(), m_codeBlock->m_instructions->sizeInBytes() + m_codeBlock->metadataSizeInBytes()); } UnlinkedHandlerInfo* UnlinkedCodeBlockGenerator::handlerForBytecodeIndex(BytecodeIndex bytecodeIndex, RequiredHandler requiredHandler) @@ -194,8 +131,13 @@ void UnlinkedCodeBlockGenerator::applyModification(BytecodeRewriter& rewriter, J m_typeProfilerInfoMap.swap(adjustedTypeProfilerInfoMap); } - for (size_t i = 0; i < m_expressionInfo.size(); ++i) - m_expressionInfo[i].instructionOffset = rewriter.adjustAbsoluteOffset(m_expressionInfo[i].instructionOffset); + Vector bytecodeOffsetAdjustments; + rewriter.forEachLabelPoint([&] (int32_t bytecodeOffset) { + bytecodeOffsetAdjustments.append(bytecodeOffset); + }); + m_expressionInfoEncoder.remap(WTFMove(bytecodeOffsetAdjustments), [&] (int32_t bytecodeOffset) { + return rewriter.adjustAbsoluteOffset(bytecodeOffset); + }); // Then, modify the unlinked instructions. rewriter.applyModification(); diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h index 184235dc..49d68dde 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2021 Apple Inc. All rights reserved. + * Copyright (C) 2019-2024 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,6 +27,7 @@ #pragma once #include "UnlinkedCodeBlock.h" +#include #include namespace JSC { @@ -34,7 +35,7 @@ namespace JSC { // FIXME: Create UnlinkedCodeBlock inside UnlinkedCodeBlockGenerator. // https://bugs.webkit.org/show_bug.cgi?id=207212 class UnlinkedCodeBlockGenerator { - WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_TZONE_ALLOCATED(UnlinkedCodeBlockGenerator); WTF_MAKE_NONCOPYABLE(UnlinkedCodeBlockGenerator) public: UnlinkedCodeBlockGenerator(VM& vm, UnlinkedCodeBlock* codeBlock) @@ -76,7 +77,7 @@ class UnlinkedCodeBlockGenerator { void setNumParameters(unsigned newValue) { m_codeBlock->setNumParameters(newValue); } UnlinkedMetadataTable& metadata() { return m_codeBlock->metadata(); } - void addExpressionInfo(unsigned instructionOffset, int divot, int startOffset, int endOffset, unsigned line, unsigned column); + void addExpressionInfo(unsigned instructionOffset, unsigned divot, unsigned startOffset, unsigned endOffset, LineColumn); void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot); void addOpProfileControlFlowBytecodeOffset(JSInstructionStream::Offset offset) { @@ -190,8 +191,6 @@ class UnlinkedCodeBlockGenerator { size_t metadataSizeInBytes() { return m_codeBlock->metadataSizeInBytes(); } - void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const; - void applyModification(BytecodeRewriter&, JSInstructionStreamWriter&); void finalize(std::unique_ptr); @@ -211,13 +210,12 @@ class UnlinkedCodeBlockGenerator { Vector m_constantsSourceCodeRepresentation; Vector> m_functionDecls; Vector> m_functionExprs; - Vector m_expressionInfo; + ExpressionInfo::Encoder m_expressionInfoEncoder; OutOfLineJumpTargets m_outOfLineJumpTargets; // In RareData. Vector m_exceptionHandlers; Vector m_unlinkedSwitchJumpTables; Vector m_unlinkedStringSwitchJumpTables; - Vector m_expressionInfoFatPositions; HashMap m_typeProfilerInfoMap; Vector m_opProfileControlFlowBytecodeOffsets; Vector m_bitVectors; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp index 972091c8..3ab478c0 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp @@ -55,9 +55,9 @@ static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock( JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict; JSParserScriptMode scriptMode = executable->scriptMode(); ASSERT(isFunctionParseMode(executable->parseMode())); - auto* classFieldLocations = executable->classFieldLocations(); + auto* classElementDefinitions = executable->classElementDefinitions(); std::unique_ptr function = parse( - vm, source, executable->name(), executable->implementationVisibility(), builtinMode, strictMode, scriptMode, executable->parseMode(), executable->superBinding(), error, nullptr, ConstructorKind::None, DerivedContextType::None, EvalContextType::None, nullptr, nullptr, classFieldLocations); + vm, source, executable->name(), executable->implementationVisibility(), builtinMode, strictMode, scriptMode, executable->parseMode(), executable->functionMode(), executable->superBinding(), error, nullptr, ConstructorKind::None, DerivedContextType::None, EvalContextType::None, nullptr, nullptr, classElementDefinitions); if (!function) { ASSERT(error.isValid()); @@ -72,8 +72,9 @@ static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock( UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(vm, FunctionCode, ExecutableInfo(kind == CodeForConstruct, executable->privateBrandRequirement(), functionKind == UnlinkedBuiltinFunction, executable->constructorKind(), scriptMode, executable->superBinding(), parseMode, executable->derivedContextType(), executable->needsClassFieldInitializer(), false, isClassContext, EvalContextType::FunctionEvalContext), codeGenerationMode); auto parentScopeTDZVariables = executable->parentScopeTDZVariables(); + const FixedVector* generatorOrAsyncWrapperFunctionParameterNames = executable->generatorOrAsyncWrapperFunctionParameterNames(); const PrivateNameEnvironment* parentPrivateNameEnvironment = executable->parentPrivateNameEnvironment(); - error = BytecodeGenerator::generate(vm, function.get(), source, result, codeGenerationMode, parentScopeTDZVariables, parentPrivateNameEnvironment); + error = BytecodeGenerator::generate(vm, function.get(), source, result, codeGenerationMode, parentScopeTDZVariables, generatorOrAsyncWrapperFunctionParameterNames, parentPrivateNameEnvironment); if (error.isValid()) return nullptr; @@ -81,7 +82,7 @@ static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock( return result; } -UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM& vm, Structure* structure, const SourceCode& parentSource, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, RefPtr parentScopeTDZVariables, std::optional parentPrivateNameEnvironment, DerivedContextType derivedContextType, NeedsClassFieldInitializer needsClassFieldInitializer, PrivateBrandRequirement privateBrandRequirement, bool isBuiltinDefaultClassConstructor) +UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM& vm, Structure* structure, const SourceCode& parentSource, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, InlineAttribute inlineAttribute, JSParserScriptMode scriptMode, RefPtr parentScopeTDZVariables, std::optional>&& generatorOrAsyncWrapperFunctionParameterNames, std::optional parentPrivateNameEnvironment, DerivedContextType derivedContextType, NeedsClassFieldInitializer needsClassFieldInitializer, PrivateBrandRequirement privateBrandRequirement, bool isBuiltinDefaultClassConstructor) : Base(vm, structure) , m_firstLineOffset(node->firstLine() - parentSource.firstLine().oneBasedInt()) , m_isGeneratedFromCache(false) @@ -110,6 +111,7 @@ UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM& vm, Structure* struct , m_lexicalScopeFeatures(node->lexicalScopeFeatures()) , m_functionMode(static_cast(node->functionMode())) , m_derivedContextType(static_cast(derivedContextType)) + , m_inlineAttribute(static_cast(inlineAttribute)) , m_unlinkedCodeBlockForCall() , m_unlinkedCodeBlockForConstruct() , m_name(node->ident()) @@ -123,6 +125,7 @@ UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM& vm, Structure* struct ASSERT(m_scriptMode == static_cast(scriptMode)); ASSERT(m_superBinding == static_cast(node->superBinding())); ASSERT(m_derivedContextType == static_cast(derivedContextType)); + ASSERT(m_inlineAttribute == static_cast(inlineAttribute)); ASSERT(m_privateBrandRequirement == static_cast(privateBrandRequirement)); ASSERT(!(m_isBuiltinDefaultClassConstructor && constructorKind() == ConstructorKind::None)); ASSERT(!m_needsClassFieldInitializer || (isClassConstructorFunction() || derivedContextType == DerivedContextType::DerivedConstructorContext)); @@ -130,6 +133,8 @@ UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM& vm, Structure* struct setClassSource(node->classSource()); if (parentScopeTDZVariables) ensureRareData().m_parentScopeTDZVariables = WTFMove(parentScopeTDZVariables); + if (generatorOrAsyncWrapperFunctionParameterNames) + ensureRareData().m_generatorOrAsyncWrapperFunctionParameterNames = FixedVector(WTFMove(generatorOrAsyncWrapperFunctionParameterNames.value())); if (parentPrivateNameEnvironment) ensureRareData().m_parentPrivateNameEnvironment = WTFMove(*parentPrivateNameEnvironment); } diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h index 1dde9691..36bd562d 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h @@ -29,9 +29,9 @@ #include "ConstructAbility.h" #include "ConstructorKind.h" #include "ExecutableInfo.h" -#include "ExpressionRangeInfo.h" #include "Identifier.h" #include "ImplementationVisibility.h" +#include "InlineAttribute.h" #include "Intrinsic.h" #include "JSCast.h" #include "ParserModes.h" @@ -72,10 +72,10 @@ class UnlinkedFunctionExecutable final : public JSCell { return &vm.unlinkedFunctionExecutableSpace(); } - static UnlinkedFunctionExecutable* create(VM& vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, RefPtr parentScopeTDZVariables, std::optional parentPrivateNameEnvironment, DerivedContextType derivedContextType, NeedsClassFieldInitializer needsClassFieldInitializer, PrivateBrandRequirement privateBrandRequirement, bool isBuiltinDefaultClassConstructor = false) + static UnlinkedFunctionExecutable* create(VM& vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, InlineAttribute inlineAttribute, JSParserScriptMode scriptMode, RefPtr parentScopeTDZVariables, std::optional>&& generatorOrAsyncWrapperFunctionParameterNames, std::optional parentPrivateNameEnvironment, DerivedContextType derivedContextType, NeedsClassFieldInitializer needsClassFieldInitializer, PrivateBrandRequirement privateBrandRequirement, bool isBuiltinDefaultClassConstructor = false) { UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell(vm)) - UnlinkedFunctionExecutable(vm, vm.unlinkedFunctionExecutableStructure.get(), source, node, unlinkedFunctionKind, constructAbility, scriptMode, WTFMove(parentScopeTDZVariables), WTFMove(parentPrivateNameEnvironment), derivedContextType, needsClassFieldInitializer, privateBrandRequirement, isBuiltinDefaultClassConstructor); + UnlinkedFunctionExecutable(vm, vm.unlinkedFunctionExecutableStructure.get(), source, node, unlinkedFunctionKind, constructAbility, inlineAttribute, scriptMode, WTFMove(parentScopeTDZVariables), WTFMove(generatorOrAsyncWrapperFunctionParameterNames), WTFMove(parentPrivateNameEnvironment), derivedContextType, needsClassFieldInitializer, privateBrandRequirement, isBuiltinDefaultClassConstructor); instance->finishCreation(vm); return instance; } @@ -181,6 +181,13 @@ class UnlinkedFunctionExecutable final : public JSCell { return m_rareData->m_parentScopeTDZVariables; } + const FixedVector* generatorOrAsyncWrapperFunctionParameterNames() const + { + if (!m_rareData) + return nullptr; + return &m_rareData->m_generatorOrAsyncWrapperFunctionParameterNames; + } + const PrivateNameEnvironment* parentPrivateNameEnvironment() const { if (!m_rareData) @@ -192,6 +199,8 @@ class UnlinkedFunctionExecutable final : public JSCell { JSC::DerivedContextType derivedContextType() const {return static_cast(m_derivedContextType); } + InlineAttribute inlineAttribute() const { return static_cast(m_inlineAttribute); } + String sourceURLDirective() const { if (m_rareData) @@ -215,6 +224,22 @@ class UnlinkedFunctionExecutable final : public JSCell { void finalizeUnconditionally(VM&, CollectionScope); + struct ClassElementDefinition { + WTF_MAKE_STRUCT_FAST_ALLOCATED; + + enum class Kind : uint8_t { + FieldWithLiteralPropertyKey = 0, + FieldWithComputedPropertyKey = 1, + FieldWithPrivatePropertyKey = 2, + StaticInitializationBlock = 3, + }; + + Identifier ident { }; + JSTextPosition position { }; + std::optional initializerPosition { std::nullopt }; + Kind kind { Kind::FieldWithLiteralPropertyKey }; + }; + struct RareData { WTF_MAKE_STRUCT_FAST_ALLOCATED; @@ -222,28 +247,29 @@ class UnlinkedFunctionExecutable final : public JSCell { String m_sourceURLDirective; String m_sourceMappingURLDirective; RefPtr m_parentScopeTDZVariables; - FixedVector m_classFieldLocations; + FixedVector m_generatorOrAsyncWrapperFunctionParameterNames; + FixedVector m_classElementDefinitions; PrivateNameEnvironment m_parentPrivateNameEnvironment; }; NeedsClassFieldInitializer needsClassFieldInitializer() const { return static_cast(m_needsClassFieldInitializer); } - const FixedVector* classFieldLocations() const + const FixedVector* classElementDefinitions() const { if (m_rareData) - return &m_rareData->m_classFieldLocations; + return &m_rareData->m_classElementDefinitions; return nullptr; } - void setClassFieldLocations(Vector&& classFieldLocations) + void setClassElementDefinitions(Vector&& classElementDefinitions) { - if (classFieldLocations.isEmpty()) + if (classElementDefinitions.isEmpty()) return; - ensureRareData().m_classFieldLocations = FixedVector(WTFMove(classFieldLocations)); + ensureRareData().m_classElementDefinitions = FixedVector(WTFMove(classElementDefinitions)); } private: - UnlinkedFunctionExecutable(VM&, Structure*, const SourceCode&, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, JSParserScriptMode, RefPtr, std::optional, JSC::DerivedContextType, JSC::NeedsClassFieldInitializer, PrivateBrandRequirement, bool isBuiltinDefaultClassConstructor); + UnlinkedFunctionExecutable(VM&, Structure*, const SourceCode&, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, InlineAttribute, JSParserScriptMode, RefPtr, std::optional>&&, std::optional, JSC::DerivedContextType, JSC::NeedsClassFieldInitializer, PrivateBrandRequirement, bool isBuiltinDefaultClassConstructor); UnlinkedFunctionExecutable(Decoder&, const CachedFunctionExecutable&); DECLARE_VISIT_CHILDREN; @@ -284,6 +310,7 @@ class UnlinkedFunctionExecutable final : public JSCell { uint8_t m_lexicalScopeFeatures : bitWidthOfLexicalScopeFeatures; uint8_t m_functionMode : 2; // FunctionMode uint8_t m_derivedContextType : 2; + uint8_t m_inlineAttribute : 1; union { WriteBarrier m_unlinkedCodeBlockForCall; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h index 9f1e5e1f..965f1e84 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h @@ -96,6 +96,9 @@ class UnlinkedMetadataTable : public RefCounted { unsigned numValueProfiles() const { return m_numValueProfiles; } + TriState didOptimize() const { return m_didOptimize; } + void setDidOptimize(TriState didOptimize) { m_didOptimize = didOptimize; } + private: enum EmptyTag { Empty }; @@ -164,6 +167,7 @@ class UnlinkedMetadataTable : public RefCounted { bool m_isFinalized : 1; bool m_isLinked : 1; bool m_is32Bit : 1; + TriState m_didOptimize : 2 { TriState::Indeterminate }; unsigned m_numValueProfiles { 0 }; uint8_t* m_rawBuffer; }; diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/ValueProfile.h b/vendor/webkit/Source/JavaScriptCore/bytecode/ValueProfile.h index ae4fa4bb..579ea88a 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/ValueProfile.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/ValueProfile.h @@ -32,6 +32,7 @@ #include "SpeculatedType.h" #include "Structure.h" #include "VirtualRegister.h" +#include #include #include @@ -61,12 +62,12 @@ struct ValueProfileBase { void clearBuckets() { for (unsigned i = 0; i < totalNumberOfBuckets; ++i) - m_buckets[i] = JSValue::encode(JSValue()); + clearEncodedJSValueConcurrent(m_buckets[i]); } const ClassInfo* classInfo(unsigned bucket) const { - JSValue value = JSValue::decode(m_buckets[bucket]); + JSValue value = JSValue::decodeConcurrent(&m_buckets[bucket]); if (!!value) { if (!value.isCell()) return nullptr; @@ -79,7 +80,7 @@ struct ValueProfileBase { { unsigned result = 0; for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { - if (!!JSValue::decode(m_buckets[i])) + if (!!JSValue::decodeConcurrent(&m_buckets[i])) result++; } return result; @@ -95,7 +96,7 @@ struct ValueProfileBase { bool isLive() const { for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { - if (!!JSValue::decode(m_buckets[i])) + if (!!JSValue::decodeConcurrent(&m_buckets[i])) return true; } return false; @@ -131,13 +132,13 @@ struct ValueProfileBase { { SpeculatedType merged = SpecNone; for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { - JSValue value = JSValue::decode(m_buckets[i]); + JSValue value = JSValue::decodeConcurrent(&m_buckets[i]); if (!value) continue; mergeSpeculation(merged, speculationFromValue(value)); - - m_buckets[i] = JSValue::encode(JSValue()); + + updateEncodedJSValueConcurrent(m_buckets[i], JSValue::encode(JSValue())); } mergeSpeculation(m_prediction, merged); @@ -205,6 +206,8 @@ class alignas(ValueProfileAndVirtualRegister) ValueProfileAndVirtualRegisterBuff return bitwise_cast(this + 1); } + std::span span() { return { data(), size() }; } + private: ValueProfileAndVirtualRegisterBuffer(unsigned size) diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/VirtualRegister.h b/vendor/webkit/Source/JavaScriptCore/bytecode/VirtualRegister.h index be0fdc1f..c7e81681 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/VirtualRegister.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/VirtualRegister.h @@ -78,7 +78,7 @@ class VirtualRegister { int offset() const { return m_virtualRegister; } int offsetInBytes() const { return m_virtualRegister * sizeof(Register); } - friend bool operator==(VirtualRegister, VirtualRegister) = default; + friend bool operator==(const VirtualRegister&, const VirtualRegister&) = default; bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; } bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; } bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; } diff --git a/vendor/webkit/Source/JavaScriptCore/bytecode/Watchpoint.h b/vendor/webkit/Source/JavaScriptCore/bytecode/Watchpoint.h index 9e110b76..5302781e 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecode/Watchpoint.h +++ b/vendor/webkit/Source/JavaScriptCore/bytecode/Watchpoint.h @@ -111,7 +111,7 @@ class WatchpointSet; DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(Watchpoint); -class Watchpoint : public PackedRawSentinelNode { +class Watchpoint : public BasicRawSentinelNode { WTF_MAKE_NONCOPYABLE(Watchpoint); WTF_MAKE_NONMOVABLE(Watchpoint); WTF_MAKE_STRUCT_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(Watchpoint); @@ -274,7 +274,7 @@ class WatchpointSet : public ThreadSafeRefCounted { int8_t m_state; int8_t m_setIsNotEmpty; - SentinelLinkedList> m_set; + SentinelLinkedList> m_set; }; // InlineWatchpointSet is a low-overhead, non-copyable watchpoint set in which diff --git a/vendor/webkit/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/vendor/webkit/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp index d8ed9e6d..74a53db1 100644 --- a/vendor/webkit/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp +++ b/vendor/webkit/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008-2021 Apple Inc. All rights reserved. + * Copyright (C) 2008-2023 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich * Copyright (C) 2012 Igalia, S.L. * @@ -59,10 +59,14 @@ #include #include #include +#include #include namespace JSC { +WTF_MAKE_TZONE_ALLOCATED_IMPL(BytecodeGenerator); +WTF_MAKE_TZONE_ALLOCATED_IMPL(ForInContext); + template struct VarArgsOp; @@ -196,14 +200,6 @@ ParserError BytecodeGenerator::generate(unsigned& size) if (m_needToInitializeArguments) initializeVariable(variable(propertyNames().arguments), m_argumentsRegister); - if (m_restParameter) { - // We should move moving m_restParameter->emit(*this) to initializeDefaultParameterValuesAndSetupFunctionScopeStack - // as an optimization if we can prove that change has no side effect. - asyncFuncParametersTryCatchWrap([&] { - m_restParameter->emit(*this); - }); - } - { bool shouldHoistInEval = m_codeType == EvalCode && !m_ecmaMode.isStrict(); RefPtr temp = newTemporary(); @@ -331,7 +327,7 @@ ParserError BytecodeGenerator::generate(unsigned& size) } - if (m_isAsync) + if (m_needsGeneratorification) performGeneratorification(*this, m_codeBlock.get(), m_writer, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex); RELEASE_ASSERT(m_codeBlock->numCalleeLocals() < static_cast(FirstConstantRegisterIndex)); @@ -348,7 +344,7 @@ ParserError BytecodeGenerator::generate(unsigned& size) return ParserError(ParserError::ErrorNone); } -BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const PrivateNameEnvironment*) +BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const FixedVector*, const PrivateNameEnvironment*) : BytecodeGeneratorBase(makeUnique(vm, codeBlock), CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()) , m_codeGenerationMode(codeGenerationMode) , m_scopeNode(programNode) @@ -394,7 +390,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedP } } -BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, UnlinkedFunctionCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const PrivateNameEnvironment* parentPrivateNameEnvironment) +BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, UnlinkedFunctionCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const FixedVector* generatorOrAsyncWrapperFunctionParameterNames, const PrivateNameEnvironment* parentPrivateNameEnvironment) : BytecodeGeneratorBase(makeUnique(vm, codeBlock), CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()) , m_codeGenerationMode(codeGenerationMode) , m_scopeNode(functionNode) @@ -428,6 +424,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke int symbolTableConstantIndex = 0; m_cachedParentTDZ = parentScopeTDZVariables; + m_generatorOrAsyncWrapperFunctionParameterNames = generatorOrAsyncWrapperFunctionParameterNames; FunctionParameters& parameters = *functionNode->parameters(); // http://www.ecma-international.org/ecma-262/6.0/index.html#sec-functiondeclarationinstantiation @@ -444,15 +441,13 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke bool shouldCaptureSomeOfTheThings = shouldEmitDebugHooks() || functionNode->needsActivation() || containsArrowOrEvalButNotInArrowBlock; bool shouldCaptureAllOfTheThings = shouldEmitDebugHooks() || usesEval(); - bool needsArguments = ((functionNode->usesArguments() && !codeBlock->isArrowFunction()) || usesEval() || (functionNode->usesArrowFunction() && !codeBlock->isArrowFunction() && isArgumentsUsedInInnerArrowFunction())) && parseMode != SourceParseMode::ClassFieldInitializerMode; + m_needsArguments = ((functionNode->usesArguments() && !codeBlock->isArrowFunction()) || usesEval() || (functionNode->usesArrowFunction() && !codeBlock->isArrowFunction() && isArgumentsUsedInInnerArrowFunction())) && parseMode != SourceParseMode::ClassFieldInitializerMode; if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode)) { - m_isAsync = true; + m_needsGeneratorification = true; // Generator and AsyncFunction never provides "arguments". "arguments" reference will be resolved in an upper generator function scope. - needsArguments = false; - } - - if (isGeneratorOrAsyncFunctionWrapperParseMode(parseMode) && needsArguments) { + m_needsArguments = false; + } else if (isGeneratorOrAsyncFunctionWrapperParseMode(parseMode)) { // Generator does not provide "arguments". Instead, wrapping GeneratorFunction provides "arguments". // This is because arguments of a generator should be evaluated before starting it. // To workaround it, we evaluate these arguments as arguments of a wrapping generator function, and reference it from a generator. @@ -467,7 +462,12 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke // } // } // } - shouldCaptureSomeOfTheThings = true; + if (m_needsArguments) + shouldCaptureSomeOfTheThings = true; + if (parameters.size()) { + shouldCaptureSomeOfTheThings = true; + shouldCaptureAllOfTheThings = true; + } } if (shouldCaptureAllOfTheThings) @@ -476,7 +476,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke auto captures = scopedLambda([&] (UniquedStringImpl* uid) -> bool { if (!shouldCaptureSomeOfTheThings) return false; - if (needsArguments && uid == propertyNames().arguments.impl()) { + if (m_needsArguments && uid == propertyNames().arguments.impl()) { // Actually, we only need to capture the arguments object when we "need full activation" // because of name scopes. But historically we did it this way, so for now we just preserve // the old behavior. @@ -540,28 +540,12 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke if (isSimpleParameterList) initializeVarLexicalEnvironment(symbolTableConstantIndex, functionSymbolTable, shouldCaptureSomeOfTheThings); - // Figure out some interesting facts about our arguments. - bool capturesAnyArgumentByName = false; - if (functionNode->hasCapturedVariables()) { - FunctionParameters& parameters = *functionNode->parameters(); - for (size_t i = 0; i < parameters.size(); ++i) { - auto pattern = parameters.at(i).first; - if (!pattern->isBindingNode()) - continue; - const Identifier& ident = static_cast(pattern)->boundProperty(); - capturesAnyArgumentByName |= captures(ident.impl()); - } - } - - if (capturesAnyArgumentByName) - ASSERT(m_lexicalEnvironmentRegister); - // Need to know what our functions are called. Parameters have some goofy behaviors when it // comes to functions of the same name. for (FunctionMetadataNode* function : functionNode->functionStack()) m_functions.add(function->ident().impl()); - if (needsArguments) { + if (m_needsArguments) { // Create the arguments object now. We may put the arguments object into the activation if // it is captured. Either way, we create two arguments object variables: one is our // private variable that is immutable, and another that is the user-visible variable. The @@ -572,12 +556,25 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke m_argumentsRegister->ref(); } - if (needsArguments && !ecmaMode.isStrict() && isSimpleParameterList) { + if (m_needsArguments && !ecmaMode.isStrict() && isSimpleParameterList) { // If we captured any formal parameter by name, then we use ScopedArguments. Otherwise we // use DirectArguments. With ScopedArguments, we lift all of our arguments into the // activation. + bool capturesAnyParameterByName = false; + if (functionNode->hasCapturedVariables()) { + for (size_t i = 0; i < parameters.size(); ++i) { + auto pattern = parameters.at(i).first; + ASSERT(pattern->isBindingNode()); + const Identifier& ident = static_cast(pattern)->boundProperty(); + if (captures(ident.impl())) { + capturesAnyParameterByName = true; + break; + } + } + } - if (capturesAnyArgumentByName) { + if (capturesAnyParameterByName) { + ASSERT(m_lexicalEnvironmentRegister); bool success = functionSymbolTable->trySetArgumentsLength(vm, parameters.size()); if (UNLIKELY(!success)) { m_outOfMemoryDuringConstruction = true; @@ -649,7 +646,7 @@ IGNORE_GCC_WARNINGS_END } } - if (needsArguments && (ecmaMode.isStrict() || !isSimpleParameterList)) { + if (m_needsArguments && (ecmaMode.isStrict() || !isSimpleParameterList)) { // Allocate a cloned arguments object. OpCreateClonedArguments::emit(this, m_argumentsRegister); } @@ -676,7 +673,7 @@ IGNORE_GCC_WARNINGS_END // This is our final act of weirdness. "arguments" is overridden by everything except the // callee. We add it to the symbol table if it's not already there and it's not an argument. bool shouldCreateArgumentsVariableInParameterScope = false; - if (needsArguments) { + if (m_needsArguments) { // If "arguments" is overridden by a function or destructuring parameter name, then it's // OK for us to call createVariable() because it won't change anything. It's also OK for // us to them tell BytecodeGenerator::generate() to write to it because it will do so @@ -717,6 +714,8 @@ IGNORE_GCC_WARNINGS_END continue; if (shouldCreateArgumentsVariableInParameterScope && entry.key.get() == propertyNames().arguments.impl()) continue; + if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode) && generatorOrAsyncWrapperFunctionParameterNames->contains(entry.key.get())) + continue; createVariable(Identifier::fromUid(m_vm, entry.key.get()), varKind(entry.key.get()), functionSymbolTable, IgnoreExisting); } @@ -726,17 +725,7 @@ IGNORE_GCC_WARNINGS_END switch (parseMode) { case SourceParseMode::GeneratorWrapperFunctionMode: - case SourceParseMode::GeneratorWrapperMethodMode: { - m_generatorRegister = addVar(); - - // FIXME: Emit to_this only when Generator uses it. - // https://bugs.webkit.org/show_bug.cgi?id=151586 - emitToThis(); - - emitCreateGenerator(m_generatorRegister, &m_calleeRegister); - break; - } - + case SourceParseMode::GeneratorWrapperMethodMode: case SourceParseMode::AsyncGeneratorWrapperMethodMode: case SourceParseMode::AsyncGeneratorWrapperFunctionMode: { m_generatorRegister = addVar(); @@ -745,7 +734,6 @@ IGNORE_GCC_WARNINGS_END // https://bugs.webkit.org/show_bug.cgi?id=151586 emitToThis(); - emitCreateAsyncGenerator(m_generatorRegister, &m_calleeRegister); break; } @@ -878,6 +866,11 @@ IGNORE_GCC_WARNINGS_END if (m_scopeNode->needsNewTargetRegisterForThisScope()) emitLoadNewTargetFromArrowFunctionLexicalEnvironment(); } + + if (isGeneratorWrapperParseMode(parseMode)) + emitCreateGenerator(m_generatorRegister, &m_calleeRegister); + else if (isAsyncGeneratorWrapperParseMode(parseMode)) + emitCreateAsyncGenerator(m_generatorRegister, &m_calleeRegister); // Set up the lexical environment scope as the generator frame. We store the saved and resumed generator registers into this scope with the symbol keys. // Since they are symbol keyed, these variables cannot be reached from the usual code. @@ -901,7 +894,7 @@ IGNORE_GCC_WARNINGS_END pushLexicalScope(m_scopeNode, ScopeType::LetConstScope, TDZCheckOptimization::Optimize, NestedScopeType::IsNotNested, nullptr, shouldInitializeBlockScopedFunctions); } -BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const PrivateNameEnvironment* parentPrivateNameEnvironment) +BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const FixedVector*, const PrivateNameEnvironment* parentPrivateNameEnvironment) : BytecodeGeneratorBase(makeUnique(vm, codeBlock), CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()) , m_codeGenerationMode(codeGenerationMode) , m_scopeNode(evalNode) @@ -939,7 +932,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCod for (auto& entry : varDeclarations) { ASSERT(entry.value.isVar()); ASSERT(entry.key->isAtom() || entry.key->isSymbol()); - if (entry.value.isSloppyModeHoistingCandidate()) + if (entry.value.isSloppyModeHoistedFunction()) hoistedFunctions.append(Identifier::fromUid(m_vm, entry.key.get())); else if (!entry.value.isFunction()) variables.append(Identifier::fromUid(m_vm, entry.key.get())); @@ -965,7 +958,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, EvalNode* evalNode, UnlinkedEvalCod pushLexicalScope(m_scopeNode, ScopeType::LetConstScope, TDZCheckOptimization::Optimize, NestedScopeType::IsNotNested, nullptr, shouldInitializeBlockScopedFunctions); } -BytecodeGenerator::BytecodeGenerator(VM& vm, ModuleProgramNode* moduleProgramNode, UnlinkedModuleProgramCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const PrivateNameEnvironment*) +BytecodeGenerator::BytecodeGenerator(VM& vm, ModuleProgramNode* moduleProgramNode, UnlinkedModuleProgramCodeBlock* codeBlock, OptionSet codeGenerationMode, const RefPtr& parentScopeTDZVariables, const FixedVector*, const PrivateNameEnvironment*) : BytecodeGeneratorBase(makeUnique(vm, codeBlock), CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()) , m_codeGenerationMode(codeGenerationMode) , m_scopeNode(moduleProgramNode) @@ -1010,7 +1003,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, ModuleProgramNode* moduleProgramNod }; if (moduleProgramNode->usesAwait()) { - m_isAsync = true; + m_needsGeneratorification = true; initializeNextParameter(); // |this| for (unsigned i = 0; i < JSGenerator::Argument::NumberOfArguments; ++i) initializeNextParameter(); @@ -1071,8 +1064,8 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, ModuleProgramNode* moduleProgramNod // So it should be called after putting our lexical environment to the TDZ stack correctly. for (FunctionMetadataNode* function : moduleProgramNode->functionStack()) { - const auto& iterator = moduleProgramNode->varDeclarations().find(function->ident().impl()); - RELEASE_ASSERT(iterator != moduleProgramNode->varDeclarations().end()); + const auto& iterator = moduleProgramNode->lexicalVariables().find(function->ident().impl()); + RELEASE_ASSERT(iterator != moduleProgramNode->lexicalVariables().end()); RELEASE_ASSERT(!iterator->value.isImported()); VarKind varKind = lookUpVarKind(iterator->key.get(), iterator->value); @@ -1184,6 +1177,9 @@ void BytecodeGenerator::initializeDefaultParameterValuesAndSetupFunctionScopeSta parameter.first->bindValue(*this, temp.get()); } + if (m_restParameter) + m_restParameter->emit(*this); + // Final act of weirdness for default parameters. If a "var" also // has the same name as a parameter, it should start out as the // value of that parameter. Note, though, that they will be distinct @@ -1537,6 +1533,11 @@ void BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond, Label& targ OpJneqPtr::emit(this, cond, moveLinkTimeConstant(nullptr, LinkTimeConstant::applyFunction), target.bind(this)); } +void BytecodeGenerator::emitJumpIfNotEvalFunction(RegisterID* cond, Label& target) +{ + OpJneqPtr::emit(this, cond, moveLinkTimeConstant(nullptr, LinkTimeConstant::evalFunction), target.bind(this)); +} + void BytecodeGenerator::emitJumpIfEmptyPropertyNameEnumerator(RegisterID* cond, Label& target) { OpJeqPtr::emit(this, cond, moveLinkTimeConstant(nullptr, LinkTimeConstant::emptyPropertyNameEnumerator), target.bind(this)); @@ -2196,19 +2197,28 @@ void BytecodeGenerator::initializeBlockScopedFunctions(VariableEnvironment& envi } } -void BytecodeGenerator::hoistSloppyModeFunctionIfNecessary(const Identifier& functionName) +void BytecodeGenerator::hoistSloppyModeFunctionIfNecessary(FunctionMetadataNode* metadata) { - if (m_scopeNode->hasSloppyModeHoistedFunction(functionName.impl())) { - if (codeType() != EvalCode) { - Variable currentFunctionVariable = variable(functionName); - RefPtr currentValue; - if (RegisterID* local = currentFunctionVariable.local()) - currentValue = local; - else { - RefPtr scope = emitResolveScope(nullptr, currentFunctionVariable); - currentValue = emitGetFromScope(newTemporary(), scope.get(), currentFunctionVariable, DoNotThrowIfNotFound); - } + if (metadata->isSloppyModeHoistedFunction()) { + const Identifier& functionName = metadata->ident(); + if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode())) { + RELEASE_ASSERT(m_generatorOrAsyncWrapperFunctionParameterNames); + if (m_generatorOrAsyncWrapperFunctionParameterNames->contains(functionName)) + return; + } + + Variable currentFunctionVariable = variable(functionName); + RefPtr currentValue; + if (RegisterID* local = currentFunctionVariable.local()) + currentValue = local; + else { + RefPtr scope = emitResolveScope(nullptr, currentFunctionVariable); + currentValue = emitGetFromScope(newTemporary(), scope.get(), currentFunctionVariable, DoNotThrowIfNotFound); + } + + switch (codeType()) { + case FunctionCode: { ASSERT(m_varScopeLexicalScopeStackIndex); ASSERT(*m_varScopeLexicalScopeStackIndex < m_lexicalScopeStack.size()); LexicalScopeStackEntry varScope = m_lexicalScopeStack[*m_varScopeLexicalScopeStackIndex]; @@ -2229,16 +2239,10 @@ void BytecodeGenerator::hoistSloppyModeFunctionIfNecessary(const Identifier& fun RELEASE_ASSERT(!entry.isNull()); bool isLexicallyScoped = false; emitPutToScope(varScope.m_scope, variableForLocalEntry(functionName, entry, varScope.m_symbolTableConstantIndex, isLexicallyScoped), currentValue.get(), DoNotThrowIfNotFound, InitializationMode::NotInitialization); - } else { - Variable currentFunctionVariable = variable(functionName); - RefPtr currentValue; - if (RegisterID* local = currentFunctionVariable.local()) - currentValue = local; - else { - RefPtr scope = emitResolveScope(nullptr, currentFunctionVariable); - currentValue = emitGetFromScope(newTemporary(), scope.get(), currentFunctionVariable, DoNotThrowIfNotFound); - } - + break; + } + case GlobalCode: + case EvalCode: { RefPtr scopeId = emitResolveScopeForHoistingFuncDeclInEval(nullptr, functionName); RefPtr checkResult = emitIsUndefined(newTemporary(), scopeId.get()); @@ -2248,15 +2252,16 @@ void BytecodeGenerator::hoistSloppyModeFunctionIfNecessary(const Identifier& fun // Put to outer scope emitPutToScopeDynamic(scopeId.get(), functionName, currentValue.get(), DoNotThrowIfNotFound, InitializationMode::NotInitialization); emitLabel(isNotVarScopeLabel.get()); - + break; + } + case ModuleCode: + RELEASE_ASSERT_NOT_REACHED(); } } } RegisterID* BytecodeGenerator::emitResolveScopeForHoistingFuncDeclInEval(RegisterID* dst, const Identifier& property) { - ASSERT(m_codeType == EvalCode); - RefPtr result = finalDestination(dst); RefPtr scope = newTemporary(); OpGetScope::emit(this, scope.get()); @@ -2330,24 +2335,26 @@ void BytecodeGenerator::prepareLexicalScopeForNextForLoopIteration(VariableEnvir RegisterID* loopScope = stackEntry.m_scope; ASSERT(symbolTable->scopeSize()); ASSERT(loopScope); - Vector> activationValuesToCopyOver; - { - activationValuesToCopyOver.reserveInitialCapacity(symbolTable->scopeSize()); + struct SymbolTableNoLocks { + auto begin() const { return symbolTable->begin(NoLockingNecessary); } + auto end() const { return symbolTable->end(NoLockingNecessary); } + size_t size() const { return symbolTable->scopeSize(); } + SymbolTable* symbolTable; + } symbolTableWithoutLocks { symbolTable }; - for (auto end = symbolTable->end(NoLockingNecessary), ptr = symbolTable->begin(NoLockingNecessary); ptr != end; ++ptr) { - if (!ptr->value.varOffset().isScope()) - continue; + auto activationValuesToCopyOver = WTF::compactMap(symbolTableWithoutLocks, [&](auto& pair) -> std::optional> { + if (!pair.value.varOffset().isScope()) + return std::nullopt; - RefPtr ident = ptr->key; - Identifier identifier = Identifier::fromUid(m_vm, ident.get()); + RefPtr ident = pair.key; + auto identifier = Identifier::fromUid(m_vm, ident.get()); - RegisterID* transitionValue = newBlockScopeVariable(); - transitionValue->ref(); - emitGetFromScope(transitionValue, loopScope, variableForLocalEntry(identifier, ptr->value, loopSymbolTable->index(), true), DoNotThrowIfNotFound); - activationValuesToCopyOver.uncheckedAppend(std::make_pair(transitionValue, identifier)); - } - } + auto* transitionValue = newBlockScopeVariable(); + transitionValue->ref(); + emitGetFromScope(transitionValue, loopScope, variableForLocalEntry(identifier, pair.value, loopSymbolTable->index(), true), DoNotThrowIfNotFound); + return std::make_pair(transitionValue, identifier); + }); // We need this dynamic behavior of the executing code to ensure // each loop iteration has a new activation object. (It's pretty ugly). @@ -2480,6 +2487,28 @@ void BytecodeGenerator::createVariable( } } +std::optional BytecodeGenerator::tryResolveVariable(ExpressionNode* expr) +{ + if (expr->isResolveNode()) + return variable(static_cast(expr)->identifier()); + + if (expr->isAssignResolveNode()) + return variable(static_cast(expr)->identifier()); + + if (expr->isThisNode()) { + // After generator.ensureThis (which must be invoked in |base|'s materialization), we can ensure that |this| is in local this-register. + return variable(propertyNames().builtinNames().thisPrivateName(), ThisResolutionType::Local); + } + + if (expr->isCommaNode()) { + CommaNode* node = static_cast(expr); + for (; node->next(); node = node->next()) { } + return tryResolveVariable(node->expr()); + } + + return std::nullopt; +} + RegisterID* BytecodeGenerator::emitOverridesHasInstance(RegisterID* dst, RegisterID* constructor, RegisterID* hasInstanceValue) { OpOverridesHasInstance::emit(this, dst, constructor, hasInstanceValue); @@ -3157,6 +3186,16 @@ void BytecodeGenerator::pushTDZVariables(const VariableEnvironment& environment, m_TDZStack.append(TDZStackEntry { WTFMove(map), nullptr }); } +Vector BytecodeGenerator::getParameterNames() const +{ + RELEASE_ASSERT(m_scopeNode->isFunctionNode()); + FunctionParameters& parameters = *static_cast(m_scopeNode)->parameters(); + Vector parameterNames; + for (unsigned i = 0; i < parameters.size(); i++) + parameters.at(i).first->collectBoundIdentifiers(parameterNames); + return parameterNames; +} + std::optional BytecodeGenerator::getAvailablePrivateAccessNames() { PrivateNameEnvironment result; @@ -3404,7 +3443,7 @@ RegisterID* BytecodeGenerator::emitNewDefaultConstructor(RegisterID* dst, Constr return dst; } -RegisterID* BytecodeGenerator::emitNewClassFieldInitializerFunction(RegisterID* dst, Vector&& classFieldLocations, bool isDerived) +RegisterID* BytecodeGenerator::emitNewClassFieldInitializerFunction(RegisterID* dst, Vector&& classElementDefinitions, bool isDerived) { DerivedContextType newDerivedContextType; SuperBinding superBinding; @@ -3423,8 +3462,8 @@ RegisterID* BytecodeGenerator::emitNewClassFieldInitializerFunction(RegisterID* FunctionMetadataNode metadata(parserArena(), JSTokenLocation(), JSTokenLocation(), 0, 0, 0, 0, 0, ImplementationVisibility::Private, StrictModeLexicalFeature, ConstructorKind::None, superBinding, 0, parseMode, false); metadata.finishParsing(m_scopeNode->source(), Identifier(), FunctionMode::MethodDefinition); - auto initializer = UnlinkedFunctionExecutable::create(m_vm, m_scopeNode->source(), &metadata, isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction, constructAbility, scriptMode(), WTFMove(variablesUnderTDZ), WTFMove(parentPrivateNameEnvironment), newDerivedContextType, NeedsClassFieldInitializer::No, PrivateBrandRequirement::None); - initializer->setClassFieldLocations(WTFMove(classFieldLocations)); + auto initializer = UnlinkedFunctionExecutable::create(m_vm, m_scopeNode->source(), &metadata, isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction, constructAbility, InlineAttribute::Always, scriptMode(), WTFMove(variablesUnderTDZ), { }, WTFMove(parentPrivateNameEnvironment), newDerivedContextType, NeedsClassFieldInitializer::No, PrivateBrandRequirement::None); + initializer->setClassElementDefinitions(WTFMove(classElementDefinitions)); unsigned index = m_codeBlock->addFunctionExpr(initializer); OpNewFuncExp::emit(this, dst, scopeRegister(), index); @@ -3576,6 +3615,7 @@ RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, Expec ArgumentListNode* n = callArguments.argumentsNode()->m_listNode; if (n && n->m_expr->isSpreadExpression()) { RELEASE_ASSERT(!n->m_next); + RELEASE_ASSERT(opcodeID != op_call_direct_eval); auto expression = static_cast(n->m_expr)->expression(); if (expression->isArrayLiteral()) { auto* elements = static_cast(expression)->elements(); @@ -4885,9 +4925,9 @@ void BytecodeGenerator::emitYieldPoint(RegisterID* argument, JSAsyncGenerator::A emitLabel(mergePoint.get()); } -RegisterID* BytecodeGenerator::emitYield(RegisterID* argument, JSAsyncGenerator::AsyncGeneratorSuspendReason result) +RegisterID* BytecodeGenerator::emitYield(RegisterID* argument) { - emitYieldPoint(argument, result); + emitYieldPoint(argument, JSAsyncGenerator::AsyncGeneratorSuspendReason::Yield); Ref