Commit e8946133 authored by Kevin Velghe's avatar Kevin Velghe

New upstream version 5.212.0-alpha

parent 8bef6983
......@@ -149,7 +149,19 @@ public:
void setPrivate(void* data);
void* getPrivate();
// FIXME: We should fix the warnings for extern-template in JSObject template classes: https://bugs.webkit.org/show_bug.cgi?id=161979
#if COMPILER(CLANG)
#if __has_warning("-Wundefined-var-template")
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wundefined-var-template"
#endif
#endif
DECLARE_INFO;
#if COMPILER(CLANG)
#if __has_warning("-Wundefined-var-template")
#pragma clang diagnostic pop
#endif
#endif
JSClassRef classRef() const { return m_callbackObjectData->jsClass; }
bool inherits(JSClassRef) const;
......
......@@ -37,7 +37,7 @@ using namespace WTF::Unicode;
JSStringRef JSStringCreateWithCharacters(const JSChar* chars, size_t numChars)
{
initializeThreading();
return &OpaqueJSString::create(chars, numChars).leakRef();
return &OpaqueJSString::create(reinterpret_cast<const UChar*>(chars), numChars).leakRef();
}
JSStringRef JSStringCreateWithUTF8CString(const char* string)
......@@ -62,7 +62,7 @@ JSStringRef JSStringCreateWithUTF8CString(const char* string)
JSStringRef JSStringCreateWithCharactersNoCopy(const JSChar* chars, size_t numChars)
{
initializeThreading();
return OpaqueJSString::create(StringImpl::createWithoutCopying(chars, numChars)).leakRef();
return OpaqueJSString::create(StringImpl::createWithoutCopying(reinterpret_cast<const UChar*>(chars), numChars)).leakRef();
}
JSStringRef JSStringRetain(JSStringRef string)
......@@ -87,7 +87,7 @@ const JSChar* JSStringGetCharactersPtr(JSStringRef string)
{
if (!string)
return nullptr;
return string->characters();
return reinterpret_cast<const JSChar*>(string->characters());
}
size_t JSStringGetMaximumUTF8CStringSize(JSStringRef string)
......
......@@ -1092,12 +1092,12 @@ public:
void zeroExtend16To32(RegisterID src, RegisterID dest)
{
m_assembler.uxth<64>(dest, src);
m_assembler.uxth<32>(dest, src);
}
void signExtend16To32(RegisterID src, RegisterID dest)
{
m_assembler.sxth<64>(dest, src);
m_assembler.sxth<32>(dest, src);
}
void load8(ImplicitAddress address, RegisterID dest)
......@@ -1152,12 +1152,12 @@ public:
void zeroExtend8To32(RegisterID src, RegisterID dest)
{
m_assembler.uxtb<64>(dest, src);
m_assembler.uxtb<32>(dest, src);
}
void signExtend8To32(RegisterID src, RegisterID dest)
{
m_assembler.sxtb<64>(dest, src);
m_assembler.sxtb<32>(dest, src);
}
void store64(RegisterID src, ImplicitAddress address)
......
......@@ -138,6 +138,7 @@ private:
normalResult->setPhi(phi);
zeroResult->setPhi(phi);
m_value->replaceWithIdentity(phi);
before->updatePredecessorsAfter();
m_changed = true;
} else
makeDivisionChill(Mod);
......
......@@ -42,7 +42,7 @@
namespace JSC { namespace B3 {
template<typename T>
T* Value::as()
inline T* Value::as()
{
if (T::accepts(opcode()))
return static_cast<T*>(this);
......@@ -50,7 +50,7 @@ T* Value::as()
}
template<typename T>
const T* Value::as() const
inline const T* Value::as() const
{
return const_cast<Value*>(this)->as<T>();
}
......
......@@ -204,10 +204,11 @@ inline JITArrayMode jitArrayModeForStructure(Structure* structure)
struct ByValInfo {
ByValInfo() { }
ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath)
ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, CodeLocationLabel exceptionHandler, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath)
: bytecodeIndex(bytecodeIndex)
, notIndexJump(notIndexJump)
, badTypeJump(badTypeJump)
, exceptionHandler(exceptionHandler)
, arrayMode(arrayMode)
, arrayProfile(arrayProfile)
, badTypeJumpToDone(badTypeJumpToDone)
......@@ -223,6 +224,7 @@ struct ByValInfo {
unsigned bytecodeIndex;
CodeLocationJump notIndexJump;
CodeLocationJump badTypeJump;
CodeLocationLabel exceptionHandler;
JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code.
ArrayProfile* arrayProfile;
int16_t badTypeJumpToDone;
......
......@@ -375,7 +375,7 @@ RegisterID* ArrayNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds
handleSpread:
RefPtr<RegisterID> index = generator.emitLoad(generator.newTemporary(), jsNumber(length));
auto spreader = [this, array, index](BytecodeGenerator& generator, RegisterID* value)
auto spreader = [array, index](BytecodeGenerator& generator, RegisterID* value)
{
generator.emitDirectPutByVal(array.get(), index.get(), value);
generator.emitInc(index.get());
......
......@@ -59,4 +59,6 @@ private:
}; // namespace JSC
using JSC::SetForScope;
#endif // SetForScope_h
......@@ -148,7 +148,6 @@ private:
for (Node* node : *block) {
switch (node->op()) {
case GetFromArguments:
DFG_ASSERT(m_graph, node, node->child1()->op() == CreateDirectArguments);
break;
case GetByVal:
......
......@@ -562,8 +562,14 @@ private:
{
flushDirect(operand, findArgumentPosition(operand));
}
void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
{
addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
}
template<NodeType nodeType>
void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
{
ASSERT(!operand.isConstant());
......@@ -576,12 +582,17 @@ private:
else
variable = newVariableAccessData(operand);
node = addToGraph(Flush, OpInfo(variable));
node = addToGraph(nodeType, OpInfo(variable));
m_currentBlock->variablesAtTail.operand(operand) = node;
if (argumentPosition)
argumentPosition->addVariable(variable);
}
void phantomLocalDirect(VirtualRegister operand)
{
addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
}
void flush(InlineStackEntry* inlineStackEntry)
{
int numArguments;
......@@ -602,8 +613,32 @@ private:
void flushForTerminal()
{
for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
CodeOrigin origin = currentCodeOrigin();
unsigned bytecodeIndex = origin.bytecodeIndex;
for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) {
flush(inlineStackEntry);
ASSERT(origin.inlineCallFrame == inlineStackEntry->m_inlineCallFrame);
InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame;
CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
for (unsigned local = codeBlock->m_numCalleeLocals; local--;) {
if (livenessAtBytecode.get(local)) {
VirtualRegister reg = virtualRegisterForLocal(local);
if (inlineCallFrame)
reg = inlineStackEntry->remapOperand(reg);
phantomLocalDirect(reg);
}
}
if (inlineCallFrame) {
bytecodeIndex = inlineCallFrame->directCaller.bytecodeIndex;
origin = inlineCallFrame->directCaller;
}
}
}
void flushForReturn()
......@@ -3739,6 +3774,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
bool compiledAsGetById = false;
GetByIdStatus getByIdStatus;
unsigned identifierNumber = 0;
{
ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
ByValInfo* byValInfo = m_inlineStackTop->m_byValInfos.get(CodeOrigin(currentCodeOrigin().bytecodeIndex));
......@@ -3746,20 +3783,20 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// At that time, there is no information.
if (byValInfo && byValInfo->stubInfo && !byValInfo->tookSlowPath && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)) {
compiledAsGetById = true;
unsigned identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
addToGraph(CheckIdent, OpInfo(uid), property);
GetByIdStatus getByIdStatus = GetByIdStatus::computeForStubInfo(
getByIdStatus = GetByIdStatus::computeForStubInfo(
locker, m_inlineStackTop->m_profiledBlock,
byValInfo->stubInfo, currentCodeOrigin(), uid);
handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
}
}
if (!compiledAsGetById) {
if (compiledAsGetById)
handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
else {
ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
......
......@@ -204,6 +204,10 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
read(JSObject_butterfly);
ArrayMode mode = node->arrayMode();
switch (mode.type()) {
case Array::ForceExit: {
write(SideState);
return;
}
case Array::Int32: {
if (mode.isInBounds()) {
read(Butterfly_publicLength);
......@@ -844,7 +848,9 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
read(JSObject_butterfly);
AbstractHeap heap(NamedProperties, node->multiGetByOffsetData().identifierNumber);
read(heap);
def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node));
// FIXME: We cannot def() for MultiGetByOffset because CSE is not smart enough to decay it
// to a CheckStructure.
// https://bugs.webkit.org/show_bug.cgi?id=159859
return;
}
......
......@@ -3815,8 +3815,6 @@ void SpeculativeJIT::compile(Node* node)
}
case GetById: {
ASSERT(node->prediction());
switch (node->child1().useKind()) {
case CellUse: {
SpeculateCellOperand base(this, node->child1());
......
......@@ -3877,8 +3877,6 @@ void SpeculativeJIT::compile(Node* node)
break;
}
case GetById: {
ASSERT(node->prediction());
switch (node->child1().useKind()) {
case CellUse: {
SpeculateCellOperand base(this, node->child1());
......
......@@ -67,7 +67,7 @@ State::State(Graph& graph)
proc = std::make_unique<Procedure>();
proc->setOriginPrinter(
[this] (PrintStream& out, B3::Origin origin) {
[] (PrintStream& out, B3::Origin origin) {
out.print("DFG:", bitwise_cast<Node*>(origin.data()));
});
......
......@@ -671,27 +671,33 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
for (unsigned i = m_putByIds.size(); i--;)
m_putByIds[i].finalize(patchBuffer);
for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
CodeLocationJump notIndexJump = CodeLocationJump();
if (Jump(patchableNotIndexJump).isSet())
notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
*byValCompilationInfo.byValInfo = ByValInfo(
byValCompilationInfo.bytecodeIndex,
notIndexJump,
badTypeJump,
byValCompilationInfo.arrayMode,
byValCompilationInfo.arrayProfile,
differenceBetweenCodePtr(badTypeJump, doneTarget),
differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
differenceBetweenCodePtr(returnAddress, slowPathTarget));
if (m_byValCompilationInfo.size()) {
CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler);
for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
CodeLocationJump notIndexJump = CodeLocationJump();
if (Jump(patchableNotIndexJump).isSet())
notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
*byValCompilationInfo.byValInfo = ByValInfo(
byValCompilationInfo.bytecodeIndex,
notIndexJump,
badTypeJump,
exceptionHandler,
byValCompilationInfo.arrayMode,
byValCompilationInfo.arrayProfile,
differenceBetweenCodePtr(badTypeJump, doneTarget),
differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
differenceBetweenCodePtr(returnAddress, slowPathTarget));
}
}
for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
CallLinkInfo& info = *compilationInfo.callLinkInfo;
......@@ -763,7 +769,8 @@ void JIT::privateCompileExceptionHandlers()
jumpToExceptionHandle