Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP

Comparing changes

Choose two branches to see what's changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
base fork: facebook/hhvm
...
head fork: facebook/hhvm
Checking mergeability… Don't worry, you can still create the pull request.
  • 5 commits
  • 23 files changed
  • 0 commit comments
  • 5 contributors
Commits on Sep 27, 2012
@markw65 markw65 Streamline Unit::merge
Summary:
We're still spending a lot of time in Unit::merge.
This optimizes defClass and defFunc for the case
where the class/func is known to be unique. It also
fixes various issues in the emitter so that builtin
classes and functions, closures and continuations
are all marked as unique.
20fc5e2
@swtaarrs swtaarrs Fix order of assignment/decref in continuations and Variant
Summary:
Similar to D563995, this fixes more code that assigns a PHP
value so the new value is stored to memory before decreffing the old
value. It's more complicated but unfortunately necessary for
correctness.
7c67d64
@jdelong jdelong Turn pause_and_exit() into pause_forever()
Summary:
pthread_exit can lead to confusing crashes for us: it raises
a "forced unwind", which it seems (strangely) will enter into C++
catch (...) blocks: but if you don't rethrow from one these blocks,
glibc will call abort() from a function called unwind_cleanup.  Our
LibEventWorker has a catch block of this sort that doesn't rethrow, so
calling pthread_exit below there will abort the program.  (The point
of this function is to just wait while another thread writes a
stacktrace file, so there's no reason to exit the thread.)
a740eb0
psnell Fixed class_implements. dad7911
@paroski paroski Improve the perf of c_Map::reserve
Summary: Improve the code for growing a Map to be more efficient.
08b4a8a
View
64 src/compiler/analysis/emitter.cpp
@@ -1023,9 +1023,17 @@ void MetaInfoBuilder::setForUnit(UnitEmitter& target) const {
free(meta);
}
-static StringData* continuationClassName(const StringData* fname) {
+StringData* EmitterVisitor::continuationClassName(
+ const StringData* fname) {
std::ostringstream str;
- str << "continuation$" << fname->data();
+ str << "continuation$"
+ << '$'
+ << std::hex
+ << m_curFunc->ue().md5().q[1] << m_curFunc->ue().md5().q[0]
+ << std::dec
+ << '$'
+ << fname->data();
+
return StringData::GetStaticString(str.str());
}
@@ -3400,7 +3408,7 @@ bool EmitterVisitor::visitImpl(ConstructPtr node) {
const Location* sLoc = ce->getLocation().get();
PreClassEmitter* pce = m_ue.newPreClassEmitter(className,
PreClass::NotHoistable);
- pce->init(sLoc->line0, sLoc->line1, m_ue.bcPos(), AttrNone,
+ pce->init(sLoc->line0, sLoc->line1, m_ue.bcPos(), AttrUnique,
parentName, NULL);
e.DefCls(pce->id());
@@ -4741,6 +4749,9 @@ void EmitterVisitor::emitPostponedMeths() {
attrs = (Attr)(attrs | AttrNoOverride);
}
}
+ } else if (!SystemLib::s_inited) {
+ // we're building systemlib. everything is unique
+ attrs = (Attr)(attrs | AttrUnique);
}
// For closures, the MethodStatement didn't have real attributes; enforce
@@ -4869,7 +4880,7 @@ void EmitterVisitor::newContinuationClass(const StringData* name) {
StringData::GetStaticString("GenericContinuation");
PreClassEmitter* pce = m_ue.newPreClassEmitter(className,
PreClass::AlwaysHoistable);
- pce->init(0, 0, m_ue.bcPos(), AttrNone, parentName, NULL);
+ pce->init(0, 0, m_ue.bcPos(), AttrUnique, parentName, NULL);
}
void EmitterVisitor::emitPostponedCtors() {
@@ -5335,19 +5346,26 @@ PreClass::Hoistable EmitterVisitor::emitClass(Emitter& e, ClassScopePtr cNode,
if (cNode->getUsedTraitNames().size()) {
attr = (Attr)(attr | AttrNoExpandTrait);
}
+ } else if (!SystemLib::s_inited) {
+ // we're building systemlib. everything is unique
+ attr = (Attr)(attr | AttrUnique);
}
+
const Location* sLoc = is->getLocation().get();
const std::vector<std::string>& bases(cNode->getBases());
int firstInterface = cNode->getOriginalParent().empty() ? 0 : 1;
int nInterfaces = bases.size();
PreClass::Hoistable hoistable = PreClass::NotHoistable;
if (toplevel) {
- if (nInterfaces > firstInterface || cNode->getUsedTraitNames().size()) {
- hoistable = PreClass::Mergeable;
- } else if (firstInterface &&
- !m_hoistables.count(cNode->getOriginalParent())) {
- hoistable = PreClass::MaybeHoistable;
- } else {
+ if (SystemLib::s_inited) {
+ if (nInterfaces > firstInterface || cNode->getUsedTraitNames().size()) {
+ hoistable = PreClass::Mergeable;
+ } else if (firstInterface &&
+ !m_hoistables.count(cNode->getOriginalParent())) {
+ hoistable = PreClass::MaybeHoistable;
+ }
+ }
+ if (hoistable == PreClass::NotHoistable) {
hoistable = PreClass::AlwaysHoistable;
m_hoistables.insert(cNode->getOriginalName());
}
@@ -5867,16 +5885,20 @@ StringData* EmitterVisitor::newClosureName() {
if (m_curFunc->pce() != NULL) {
str << m_curFunc->pce()->name()->data();
}
- str << "$";
+ str << '$';
if (m_curFunc->isPseudoMain()) {
- // Pseudo-main. Uniquify via md5.
- str << "__pseudoMain" << std::hex
- << m_curFunc->ue().md5().q[1] << m_curFunc->ue().md5().q[0]
- << std::dec;
+ str << "__pseudoMain";
} else {
str << m_curFunc->name()->data();
}
- str << "$" << m_closureCounter++;
+ /*
+ * Uniquify the name
+ */
+ str << '$'
+ << std::hex
+ << m_curFunc->ue().md5().q[1] << m_curFunc->ue().md5().q[0]
+ << std::dec
+ << '$' << m_closureCounter++;
return StringData::GetStaticString(str.str());
}
@@ -6067,6 +6089,7 @@ static Unit* emitHHBCNativeFuncUnit(const HhbcExtFuncInfo* builtinFuncs,
ue->emitOp(OpNativeImpl);
Offset past = ue->bcPos();
fe->setMaxStackCells(kNumActRecCells + 1);
+ fe->setAttrs(Attr(fe->attrs()|AttrUnique));
fe->finish(past, false);
ue->recordFunction(fe);
}
@@ -6171,6 +6194,13 @@ static Unit* emitHHBCNativeClassUnit(const HhbcExtClassInfo* builtinClasses,
mfe->finish(past, false);
ue->recordFunction(mfe);
+ TypedValue mainReturn;
+ mainReturn.m_data.num = 1;
+ mainReturn.m_type = KindOfBoolean;
+ // _count is the "Unit::isMergeOnly()" flag
+ mainReturn._count = 1;
+ ue->setMainReturn(&mainReturn);
+
MetaInfoBuilder metaInfo;
ContMethMap contMethods;
@@ -6238,7 +6268,7 @@ static Unit* emitHHBCNativeClassUnit(const HhbcExtClassInfo* builtinClasses,
StringData::GetStaticString(e.ci->getParentClass().get());
PreClassEmitter* pce = ue->newPreClassEmitter(e.name,
PreClass::AlwaysHoistable);
- pce->init(0, 0, ue->bcPos(), AttrNone, parentName, NULL);
+ pce->init(0, 0, ue->bcPos(), AttrUnique, parentName, NULL);
pce->setBuiltinClassInfo(e.ci, e.info->m_InstanceCtor, e.info->m_sizeof);
{
ClassInfo::InterfaceVec intfVec = e.ci->getInterfacesVec();
View
1  src/compiler/analysis/emitter.h
@@ -570,6 +570,7 @@ class EmitterVisitor {
void saveMaxStackCells(FuncEmitter* fe);
void finishFunc(Emitter& e, FuncEmitter* fe);
StringData* newClosureName();
+ StringData* continuationClassName(const StringData* fname);
void newContinuationClass(const StringData* name);
void initScalar(TypedValue& tvVal, ExpressionPtr val);
View
15 src/runtime/base/builtin_functions.cpp
@@ -1000,14 +1000,13 @@ Object create_object(CStrRef s, CArrRef params, bool init /* = true */,
}
}
-void pause_and_exit() {
- // NOTE: This is marked as __attribute__((noreturn)) in base/types.h
- // Signal sent, nothing can be trusted, don't do anything, as we might
- // write bad data, including calling exit handlers or destructors until the
- // signal handler (StackTrace) has had a chance to exit.
- sleep(300);
- // Should abort first, but it not try to exit
- pthread_exit(0);
+/*
+ * This function is used when another thread is segfaulting---we just
+ * want to wait forever to give it a chance to write a stacktrace file
+ * (and maybe a core file).
+ */
+void pause_forever() {
+ for (;;) sleep(300);
}
void check_request_surprise(ThreadInfo *info) {
View
6 src/runtime/base/class_info.cpp
@@ -496,6 +496,12 @@ void ClassInfo::getAllParentsVec(ClassVec &parents) const {
}
void ClassInfo::getAllInterfacesVec(InterfaceVec &interfaces) const {
+ CStrRef parent = getParentClass();
+ if (!parent.empty()) {
+ const ClassInfo *info = FindClass(parent);
+ if (info) info->getAllInterfacesVec(interfaces);
+ }
+
const InterfaceVec &ifs = getInterfacesVec();
for (unsigned int i = 0; i < ifs.size(); i++) {
CStrRef intf = ifs[i];
View
21 src/runtime/base/program_functions.cpp
@@ -55,8 +55,6 @@
#include <runtime/base/util/simple_counter.h>
#include <runtime/base/util/extended_logger.h>
-#include <runtime/vm/translator/translator-x64.h>
-
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/positional_options.hpp>
#include <boost/program_options/variables_map.hpp>
@@ -1131,26 +1129,7 @@ void hphp_process_init() {
init_literal_varstrings();
if (hhvm) {
- if (!RuntimeOption::RepoAuthoritative &&
- RuntimeOption::EvalJitEnableRenameFunction &&
- RuntimeOption::EvalJit) {
- VM::Func::enableIntercept();
- VM::Transl::TranslatorX64* tx64 = VM::Transl::TranslatorX64::Get();
- tx64->enableIntercepts();
- }
- bool db = RuntimeOption::EvalDumpBytecode;
- bool p = RuntimeOption::RepoAuthoritative;
- bool rp = RuntimeOption::AlwaysUseRelativePath;
- bool sf = RuntimeOption::SafeFileAccess;
- RuntimeOption::EvalDumpBytecode = false;
- RuntimeOption::RepoAuthoritative = false;
- RuntimeOption::AlwaysUseRelativePath = false;
- RuntimeOption::SafeFileAccess = false;
HPHP::VM::ProcessInit();
- RuntimeOption::EvalDumpBytecode = db;
- RuntimeOption::RepoAuthoritative = p;
- RuntimeOption::AlwaysUseRelativePath = rp;
- RuntimeOption::SafeFileAccess = sf;
}
PageletServer::Restart();
View
215 src/runtime/base/type_variant.cpp
@@ -200,17 +200,21 @@ static void destructRef(RefData *p) { p->release(); }
static void (*destructors[4])(RefData *) =
{destructString, destructArray, destructObject, destructRef};
-inline ALWAYS_INLINE void Variant::destructImpl() {
- ASSERT(!isPrimitive());
+inline ALWAYS_INLINE void Variant::destructDataImpl(RefData* data, DataType t) {
+ ASSERT(IS_REFCOUNTED_TYPE(t));
CT_ASSERT(KindOfString + 1 == KindOfArray &&
KindOfArray + 1 == KindOfObject &&
KindOfObject + 1 == KindOfRef);
- if (m_data.pref->decRefCount() == 0) {
- ASSERT(m_type >= KindOfString && m_type <= KindOfRef);
- destructors[m_type - KindOfString](m_data.pref);
+ if (data->decRefCount() == 0) {
+ ASSERT(t >= KindOfString && t <= KindOfRef);
+ destructors[t - KindOfString](data);
}
}
+inline ALWAYS_INLINE void Variant::destructImpl() {
+ destructDataImpl(m_data.pref, m_type);
+}
+
namespace VM {
HOT_FUNC_VM
@@ -248,6 +252,11 @@ void Variant::destruct() {
}
HOT_FUNC
+void Variant::destructData(RefData* data, DataType t) {
+ destructDataImpl(data, t);
+}
+
+HOT_FUNC
Variant::~Variant() {
if (IS_REFCOUNTED_TYPE(m_type)) destructImpl();
}
@@ -270,147 +279,67 @@ Variant &Variant::setWithRef(CVarRef v, const ArrayData *arr /* = NULL */) {
return *this;
}
-void Variant::setNull() {
- if (isPrimitive()) {
- m_type = KindOfNull;
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->setNull();
- } else {
- destruct();
- m_type = KindOfNull;
- }
-}
-
-HOT_FUNC
-CVarRef Variant::set(bool v) {
- if (isPrimitive()) {
- // do nothing
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->set(v);
- return *this;
- } else {
- destruct();
- }
- m_type = KindOfBoolean;
- m_data.num = (v ? 1 : 0);
- return *this;
-}
-
-CVarRef Variant::set(int v) {
- if (isPrimitive()) {
- // do nothing
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->set(v);
- return *this;
- } else {
- destruct();
- }
- m_type = KindOfInt64;
- m_data.num = v;
- return *this;
-}
-
-HOT_FUNC
-CVarRef Variant::set(int64 v) {
- if (isPrimitive()) {
- // do nothing
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->set(v);
- return *this;
- } else {
- destruct();
- }
- m_type = KindOfInt64;
- m_data.num = v;
- return *this;
-}
-
-CVarRef Variant::set(double v) {
- if (isPrimitive()) {
- // do nothing
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->set(v);
- return *this;
- } else {
- destruct();
- }
- m_type = KindOfDouble;
- m_data.dbl = v;
- return *this;
-}
-
-CVarRef Variant::set(litstr v) {
- if (isPrimitive()) {
- // do nothing
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->set(v);
- return *this;
- } else {
- destruct();
- }
- m_type = KindOfString;
- m_data.pstr = NEW(StringData)(v);
- m_data.pstr->incRefCount();
- return *this;
-}
-
-HOT_FUNC
-CVarRef Variant::set(StringData *v) {
- Variant *self = m_type == KindOfRef ? m_data.pref->var() : this;
- if (UNLIKELY(!v)) {
- self->setNull();
- } else {
- v->incRefCount();
- if (IS_REFCOUNTED_TYPE(self->m_type)) self->destruct();
- self->m_type = v->isStatic() ? KindOfStaticString : KindOfString;
- self->m_data.pstr = v;
- }
- return *this;
-}
-
-CVarRef Variant::set(const StaticString & v) {
- if (isPrimitive()) {
- // do nothing
- } else if (m_type == KindOfRef) {
- m_data.pref->var()->set(v);
- return *this;
- } else {
- destruct();
+#define IMPLEMENT_SET_IMPL(name, argType, argName, setOp, returnStmt) \
+ Variant::name(argType argName) { \
+ if (isPrimitive()) { \
+ setOp; \
+ } else if (m_type == KindOfRef) { \
+ m_data.pref->var()->name(argName); \
+ returnStmt; \
+ } else { \
+ RefData* d = m_data.pref; \
+ DataType t = m_type; \
+ setOp; \
+ destructData(d, t); \
+ } \
+ returnStmt; \
+ }
+#define IMPLEMENT_VOID_SET(name, setOp) \
+ void IMPLEMENT_SET_IMPL(name, , , setOp, return)
+#define IMPLEMENT_SET(argType, setOp) \
+ CVarRef IMPLEMENT_SET_IMPL(set, argType, v, setOp, return *this)
+
+IMPLEMENT_VOID_SET(setNull, m_type = KindOfNull)
+HOT_FUNC IMPLEMENT_SET(bool, m_type = KindOfBoolean; m_data.num = v)
+IMPLEMENT_SET(int, m_type = KindOfInt64; m_data.num = v)
+HOT_FUNC IMPLEMENT_SET(int64, m_type = KindOfInt64; m_data.num = v)
+IMPLEMENT_SET(double, m_type = KindOfDouble; m_data.dbl = v)
+IMPLEMENT_SET(litstr,
+ m_type = KindOfString;
+ m_data.pstr = NEW(StringData)(v);
+ m_data.pstr->incRefCount())
+IMPLEMENT_SET(const StaticString&,
+ StringData* s = v.get();
+ ASSERT(s);
+ m_type = KindOfStaticString;
+ m_data.pstr = s)
+
+#undef IMPLEMENT_SET_IMPL
+#undef IMPLEMENT_VOID_SET
+#undef IMPLEMENT_SET
+
+#define IMPLEMENT_PTR_SET(ptr, member, dtype) \
+ CVarRef Variant::set(ptr *v) { \
+ Variant *self = m_type == KindOfRef ? m_data.pref->var() : this; \
+ if (UNLIKELY(!v)) { \
+ self->setNull(); \
+ } else { \
+ v->incRefCount(); \
+ RefData* d = self->m_data.pref; \
+ DataType t = self->m_type; \
+ self->m_type = dtype; \
+ self->m_data.member = v; \
+ if (IS_REFCOUNTED_TYPE(t)) destructData(d, t); \
+ } \
+ return *this; \
}
- StringData *s = v.get();
- ASSERT(s);
- m_type = KindOfStaticString;
- m_data.pstr = s;
- return *this;
-}
-HOT_FUNC
-CVarRef Variant::set(ArrayData *v) {
- Variant *self = m_type == KindOfRef ? m_data.pref->var() : this;
- if (UNLIKELY(!v)) {
- self->setNull();
- } else {
- v->incRefCount();
- if (IS_REFCOUNTED_TYPE(self->m_type)) self->destruct();
- self->m_type = KindOfArray;
- self->m_data.parr = v;
- }
- return *this;
-}
+HOT_FUNC IMPLEMENT_PTR_SET(StringData, pstr,
+ v->isStatic() ? KindOfStaticString : KindOfString);
+HOT_FUNC IMPLEMENT_PTR_SET(ArrayData, parr, KindOfArray)
+HOT_FUNC IMPLEMENT_PTR_SET(ObjectData, pobj, KindOfObject)
-HOT_FUNC
-CVarRef Variant::set(ObjectData *v) {
- Variant *self = m_type == KindOfRef ? m_data.pref->var() : this;
- if (UNLIKELY(!v)) {
- self->setNull();
- } else {
- v->incRefCount();
- if (IS_REFCOUNTED_TYPE(self->m_type)) self->destruct();
- self->m_type = KindOfObject;
- self->m_data.pobj = v;
- }
- return *this;
-}
+#undef IMPLEMENT_PTR_SET
void Variant::init(ObjectData *v) {
if (v) {
View
15 src/runtime/base/type_variant.h
@@ -123,6 +123,7 @@ class Variant : VariantBase {
Variant(NoInit) {}
void destruct();
+ static void destructData(RefData* num, DataType t);
// D462768 showed no gain from inlining, even just with INLINE_VARIANT_HELPER.
~Variant();
@@ -193,6 +194,7 @@ class Variant : VariantBase {
private:
inline ALWAYS_INLINE void destructImpl();
+ inline ALWAYS_INLINE static void destructDataImpl(RefData* d, DataType t);
friend class VarNR;
// This helper is only used to construct VarNR
static const int NR_FLAG = 1 << 29;
@@ -206,8 +208,10 @@ class Variant : VariantBase {
* Break bindings and set to null.
*/
void unset() {
- if (IS_REFCOUNTED_TYPE(m_type)) destruct();
+ RefData* d = m_data.pref;
+ DataType t = m_type;
m_type = KindOfUninit;
+ if (IS_REFCOUNTED_TYPE(t)) destructData(d, t);
}
/**
@@ -1308,9 +1312,12 @@ class Variant : VariantBase {
PromoteToRef(v);
RefData* r = v.m_data.pref;
r->incRefCount(); // in case destruct() triggers deletion of v
- if (IS_REFCOUNTED_TYPE(m_type)) destruct();
+
+ RefData* d = m_data.pref;
+ DataType t = m_type;
m_type = KindOfRef;
m_data.pref = r;
+ if (IS_REFCOUNTED_TYPE(t)) destructData(d, t);
}
inline ALWAYS_INLINE void constructRefHelper(CVarRef v) {
@@ -1345,10 +1352,12 @@ class Variant : VariantBase {
rhs.m_data.pstr->incRefCount();
}
- if (destroy) destruct();
+ RefData* d = m_data.pref;
+ DataType t = m_type;
m_type = rhs.m_type;
if (m_type == KindOfUninit) m_type = KindOfNull; // drop uninit
m_data.num = rhs.m_data.num;
+ if (destroy) destructData(d, t);
}
inline ALWAYS_INLINE
View
6 src/runtime/base/types.h
@@ -397,21 +397,21 @@ inline void check_recursion(ThreadInfo *&info) {
}
// implemented in runtime/base/builtin_functions.cpp
-extern void pause_and_exit() ATTRIBUTE_COLD ATTRIBUTE_NORETURN;
+extern void pause_forever() ATTRIBUTE_COLD ATTRIBUTE_NORETURN;
extern void check_request_surprise(ThreadInfo *info) ATTRIBUTE_COLD;
extern bool SegFaulting;
inline void check_request_timeout(ThreadInfo *info) {
const_assert(!hhvm);
- if (SegFaulting) pause_and_exit();
+ if (SegFaulting) pause_forever();
info->m_mm->refreshStats();
if (info->m_reqInjectionData.conditionFlags) check_request_surprise(info);
}
inline void check_request_timeout_nomemcheck(ThreadInfo *info) {
const_assert(!hhvm);
- if (SegFaulting) pause_and_exit();
+ if (SegFaulting) pause_forever();
if (info->m_reqInjectionData.conditionFlags) check_request_surprise(info);
}
View
208 src/runtime/ext/ext_collection.cpp
@@ -984,10 +984,13 @@ void c_Map::throwOOB() {
throw e;
}
-bool hit_string_key(const c_Map::Bucket* p, const char* k,
- int len, int32 hash) ALWAYS_INLINE;
-bool hit_string_key(const c_Map::Bucket* p, const char* k,
- int len, int32 hash) {
+#define STRING_HASH(x) (int32_t(x) | 0x80000000)
+
+bool hitStringKey(const c_Map::Bucket* p, const char* k,
+ int len, int32_t hash) ALWAYS_INLINE;
+bool hitStringKey(const c_Map::Bucket* p, const char* k,
+ int len, int32_t hash) {
+ ASSERT(p->validValue());
if (p->hasIntKey()) return false;
const char* data = p->skey->data();
return data == k || (p->hash() == hash &&
@@ -995,111 +998,103 @@ bool hit_string_key(const c_Map::Bucket* p, const char* k,
memcmp(data, k, len) == 0);
}
-c_Map::Bucket* c_Map::find(int64 h) const {
- Bucket* p = fetchBucket(h & m_nLastSlot);
- if (LIKELY(p->validValue() && p->hasIntKey() && p->ikey == h)) {
- return p;
- }
- if (LIKELY(p->empty())) {
- return NULL;
- }
- size_t probeIndex = h;
- for (size_t i = 1;; ++i) {
- ASSERT(i <= m_nLastSlot);
- probeIndex = (probeIndex + i) & m_nLastSlot;
- ASSERT(((size_t(h)+((i + i*i) >> 1)) & m_nLastSlot) == probeIndex);
- p = fetchBucket(probeIndex);
- if (p->validValue() && p->hasIntKey() && p->ikey == h) {
- return p;
- }
- if (p->empty()) {
- return NULL;
- }
- }
+bool hitIntKey(const c_Map::Bucket* p, int64 ki) ALWAYS_INLINE;
+bool hitIntKey(const c_Map::Bucket* p, int64 ki) {
+ ASSERT(p->validValue());
+ return p->ikey == ki && p->hasIntKey();
}
-c_Map::Bucket* c_Map::find(const char* k, int len, int64 prehash) const {
- int32 hash = c_Map::Bucket::encodeHash(prehash);
- Bucket* p = fetchBucket(prehash & m_nLastSlot);
- if (LIKELY(p->validValue() && hit_string_key(p, k, len, hash))) {
- return p;
- }
- if (p->empty()) {
- return NULL;
- }
- size_t probeIndex = prehash;
- for (size_t i = 1;; ++i) {
- ASSERT(i <= m_nLastSlot);
- probeIndex = (probeIndex + i) & m_nLastSlot;
- ASSERT(((size_t(prehash)+((i + i*i) >> 1)) & m_nLastSlot) == probeIndex);
- p = fetchBucket(probeIndex);
- if (LIKELY(p->validValue() && hit_string_key(p, k, len, hash))) {
- return p;
- }
- if (p->empty()) {
- return NULL;
- }
+#define FIND_BODY(h0, hit) \
+ size_t tableMask = m_nLastSlot; \
+ size_t probeIndex = size_t(h0) & tableMask; \
+ Bucket* p = fetchBucket(probeIndex); \
+ if (LIKELY(p->validValue() && (hit))) { \
+ return p; \
+ } \
+ if (LIKELY(p->empty())) { \
+ return NULL; \
+ } \
+ for (size_t i = 1;; ++i) { \
+ ASSERT(i <= tableMask); \
+ probeIndex = (probeIndex + i) & tableMask; \
+ ASSERT(((size_t(h0)+((i + i*i) >> 1)) & tableMask) == probeIndex); \
+ p = fetchBucket(probeIndex); \
+ if (p->validValue() && (hit)) { \
+ return p; \
+ } \
+ if (p->empty()) { \
+ return NULL; \
+ } \
+ }
+
+#define FIND_FOR_INSERT_BODY(h0, hit) \
+ size_t tableMask = m_nLastSlot; \
+ size_t probeIndex = size_t(h0) & tableMask; \
+ Bucket* p = fetchBucket(h0 & tableMask); \
+ if (LIKELY((p->validValue() && (hit)) || \
+ p->empty())) { \
+ return p; \
+ } \
+ Bucket* ts = NULL; \
+ for (size_t i = 1;; ++i) { \
+ if (UNLIKELY(p->tombstone() && !ts)) { \
+ ts = p; \
+ } \
+ ASSERT(i <= tableMask); \
+ probeIndex = (probeIndex + i) & tableMask; \
+ ASSERT(((size_t(h0)+((i + i*i) >> 1)) & tableMask) == probeIndex); \
+ p = fetchBucket(probeIndex); \
+ if (LIKELY(p->validValue() && (hit))) { \
+ return p; \
+ } \
+ if (LIKELY(p->empty())) { \
+ if (LIKELY(!ts)) { \
+ return p; \
+ } \
+ return ts; \
+ } \
}
+
+c_Map::Bucket* c_Map::find(int64 h) const {
+ FIND_BODY(h, hitIntKey(p, h));
+}
+
+c_Map::Bucket* c_Map::find(const char* k, int len, strhash_t prehash) const {
+ FIND_BODY(prehash, hitStringKey(p, k, len, STRING_HASH(prehash)));
}
c_Map::Bucket* c_Map::findForInsert(int64 h) const {
- Bucket* p = fetchBucket(h & m_nLastSlot);
- if (LIKELY((p->validValue() && p->hasIntKey() && p->ikey == h) ||
- p->empty())) {
- return p;
- }
- Bucket* ts = NULL;
- size_t probeIndex = h;
- for (size_t i = 1;; ++i) {
- if (UNLIKELY(p->tombstone() && !ts)) {
- ts = p;
- }
- ASSERT(i <= m_nLastSlot);
- probeIndex = (probeIndex + i) & m_nLastSlot;
- ASSERT(((size_t(h)+((i + i*i) >> 1)) & m_nLastSlot) == probeIndex);
- p = fetchBucket(probeIndex);
- if (LIKELY(p->validValue() && p->hasIntKey() && p->ikey == h)) {
- return p;
- }
- if (LIKELY(p->empty())) {
- if (LIKELY(!ts)) {
- return p;
- }
- return ts;
- }
- }
+ FIND_FOR_INSERT_BODY(h, hitIntKey(p, h));
}
c_Map::Bucket* c_Map::findForInsert(const char* k, int len,
- int64 prehash) const {
- int32 hash = c_Map::Bucket::encodeHash(prehash);
- Bucket* p = fetchBucket(prehash & m_nLastSlot);
- if (LIKELY((p->validValue() && hit_string_key(p, k, len, hash)) ||
- p->empty())) {
+ strhash_t prehash) const {
+ FIND_FOR_INSERT_BODY(prehash, hitStringKey(p, k, len, STRING_HASH(prehash)));
+}
+
+inline ALWAYS_INLINE
+c_Map::Bucket* c_Map::findForNewInsert(size_t h0) const {
+ size_t tableMask = m_nLastSlot;
+ size_t probeIndex = h0 & tableMask;
+ Bucket* p = fetchBucket(probeIndex);
+ if (LIKELY(p->empty())) {
return p;
}
- Bucket* ts = NULL;
- size_t probeIndex = prehash;
for (size_t i = 1;; ++i) {
- if (UNLIKELY(p->tombstone() && !ts)) {
- ts = p;
- }
- ASSERT(i <= m_nLastSlot);
- probeIndex = (probeIndex + i) & m_nLastSlot;
- ASSERT(((size_t(prehash)+((i + i*i) >> 1)) & m_nLastSlot) == probeIndex);
+ ASSERT(i <= tableMask);
+ probeIndex = (probeIndex + i) & tableMask;
+ ASSERT(((size_t(h0)+((i + i*i) >> 1)) & tableMask) == probeIndex);
p = fetchBucket(probeIndex);
- if (LIKELY(p->validValue() && hit_string_key(p, k, len, hash))) {
- return p;
- }
if (LIKELY(p->empty())) {
- if (LIKELY(!ts)) {
- return p;
- }
- return ts;
+ return p;
}
}
}
+#undef STRING_HASH
+#undef FIND_BODY
+#undef FIND_FOR_INSERT_BODY
+
bool c_Map::update(int64 h, TypedValue* data) {
ASSERT(data->m_type != KindOfRef);
Bucket* p = findForInsert(h);
@@ -1114,7 +1109,7 @@ bool c_Map::update(int64 h, TypedValue* data) {
++m_versionNumber;
++m_size;
if (!p->tombstone()) {
- if (++m_load >= computeMaxLoad()) {
+ if (UNLIKELY(++m_load >= computeMaxLoad())) {
resize();
p = findForInsert(h);
ASSERT(p);
@@ -1128,7 +1123,7 @@ bool c_Map::update(int64 h, TypedValue* data) {
}
bool c_Map::update(StringData *key, TypedValue* data) {
- int64 h = key->hash();
+ strhash_t h = key->hash();
Bucket* p = findForInsert(key->data(), key->size(), h);
ASSERT(p);
if (p->validValue()) {
@@ -1141,7 +1136,7 @@ bool c_Map::update(StringData *key, TypedValue* data) {
++m_versionNumber;
++m_size;
if (!p->tombstone()) {
- if (++m_load >= computeMaxLoad()) {
+ if (UNLIKELY(++m_load >= computeMaxLoad())) {
resize();
p = findForInsert(key->data(), key->size(), h);
ASSERT(p);
@@ -1195,12 +1190,7 @@ void c_Map::reserve(int64 sz) {
for (uint i = 0; i < oldNumSlots; ++i) {
Bucket* p = &oldBuckets[i];
if (p->validValue()) {
- Bucket* np;
- if (p->hasIntKey()) {
- np = findForInsert((int64)p->ikey);
- } else {
- np = findForInsert(p->skey->data(), p->skey->size(), p->skey->hash());
- }
+ Bucket* np = findForNewInsert(p->hasIntKey() ? p->ikey : p->hash());
memcpy(np, p, sizeof(Bucket));
}
}
@@ -1810,10 +1800,10 @@ Variant c_StableMap::ti_fromiterable(const char* cls, CVarRef it) {
return ret;
}
-bool lm_hit_string_key(const c_StableMap::Bucket* p,
+bool sm_hit_string_key(const c_StableMap::Bucket* p,
const char* k, int len, int32 hash)
ALWAYS_INLINE;
-bool lm_hit_string_key(const c_StableMap::Bucket* p,
+bool sm_hit_string_key(const c_StableMap::Bucket* p,
const char* k, int len, int32 hash) {
if (p->hasIntKey()) return false;
const char* data = p->skey->data();
@@ -1832,10 +1822,10 @@ c_StableMap::Bucket* c_StableMap::find(int64 h) const {
}
c_StableMap::Bucket* c_StableMap::find(const char* k, int len,
- int64 prehash) const {
- int32 hash = c_StableMap::Bucket::encodeHash(prehash);
+ strhash_t prehash) const {
+ int32_t hash = c_StableMap::Bucket::encodeHash(prehash);
for (Bucket* p = m_arBuckets[prehash & m_nTableMask]; p; p = p->pNext) {
- if (lm_hit_string_key(p, k, len, hash)) return p;
+ if (sm_hit_string_key(p, k, len, hash)) return p;
}
return NULL;
}
@@ -1854,12 +1844,12 @@ c_StableMap::Bucket** c_StableMap::findForErase(int64 h) const {
}
c_StableMap::Bucket** c_StableMap::findForErase(const char* k, int len,
- int64 prehash) const {
+ strhash_t prehash) const {
Bucket** ret = &(m_arBuckets[prehash & m_nTableMask]);
Bucket* p = *ret;
- int32 hash = c_StableMap::Bucket::encodeHash(prehash);
+ int32_t hash = c_StableMap::Bucket::encodeHash(prehash);
while (p) {
- if (lm_hit_string_key(p, k, len, hash)) return ret;
+ if (sm_hit_string_key(p, k, len, hash)) return ret;
ret = &(p->pNext);
p = *ret;
}
@@ -1886,7 +1876,7 @@ bool c_StableMap::update(int64 h, CVarRef data) {
}
bool c_StableMap::update(StringData *key, CVarRef data) {
- int64 h = key->hash();
+ strhash_t h = key->hash();
Bucket* p = find(key->data(), key->size(), h);
if (p) {
p->data.assignValHelper(data);
View
46 src/runtime/ext/ext_collection.h
@@ -396,17 +396,12 @@ class c_Map : public ExtObjectDataFlags<ObjectData::MapAttrInit|
int64 ikey;
StringData *skey;
};
- // set the top bit for string hashes to make sure the hash
- // value is never zero. hash value 0 corresponds to integer key.
- static inline int32 encodeHash(int32 h) {
- return (h | 0x80000000);
- }
inline bool hasStrKey() const { return data._count != 0; }
inline bool hasIntKey() const { return data._count == 0; }
- inline void setStrKey(StringData* k, int64 h) {
+ inline void setStrKey(StringData* k, strhash_t h) {
skey = k;
skey->incRefCount();
- data._count = encodeHash(h);
+ data._count = int32_t(h) | 0x80000000;
}
inline void setIntKey(int64 k) {
ikey = k;
@@ -415,16 +410,16 @@ class c_Map : public ExtObjectDataFlags<ObjectData::MapAttrInit|
inline int64 hashKey() const {
return data._count == 0 ? ikey : data._count;
}
- inline int32 hash() const {
+ inline int32_t hash() const {
return data._count;
}
- bool validValue() {
+ bool validValue() const {
return (intptr_t(data.m_type) > 0);
}
- bool empty() {
+ bool empty() const {
return data.m_type == KindOfUninit;
}
- bool tombstone() {
+ bool tombstone() const {
return data.m_type == KindOfTombstone;
}
void dump();
@@ -494,9 +489,10 @@ class c_Map : public ExtObjectDataFlags<ObjectData::MapAttrInit|
}
Bucket* find(int64 h) const;
- Bucket* find(const char* k, int len, int64 prehash) const;
+ Bucket* find(const char* k, int len, strhash_t prehash) const;
Bucket* findForInsert(int64 h) const;
- Bucket* findForInsert(const char* k, int len, int64 prehash) const;
+ Bucket* findForInsert(const char* k, int len, strhash_t prehash) const;
+ Bucket* findForNewInsert(size_t h0) const;
bool update(int64 h, TypedValue* data);
bool update(StringData* key, TypedValue* data);
@@ -708,22 +704,12 @@ class c_StableMap : public ExtObjectDataFlags<ObjectData::StableMapAttrInit|
data(d), ikey(0), pListNext(NULL), pListLast(NULL), pNext(NULL) {
data._count = 0;
}
+ ~Bucket();
// set the top bit for string hashes to make sure the hash
// value is never zero. hash value 0 corresponds to integer key.
- static inline int32 encodeHash(int32 h) {
- return (h | 0x80000000);
+ static inline int32_t encodeHash(strhash_t h) {
+ return int32_t(h) | 0x80000000;
}
- // These special constructors do not setup all the member fields.
- // They cannot be used along but must be with the following special
- // MapImpl constructor
- Bucket(StringData *k, CVarRef d) : data(d), skey(k) {
- ASSERT(k->isStatic());
- data._count = encodeHash(k->getPrecomputedHash());
- }
- Bucket(int64 k, CVarRef d) : data(d), ikey(k) {
- data._count = 0;
- }
- ~Bucket();
/* The key is either a string pointer or an int value, and the _count
* field in data is used to discriminate the key type. _count = 0 means
@@ -741,7 +727,7 @@ class c_StableMap : public ExtObjectDataFlags<ObjectData::StableMapAttrInit|
inline bool hasStrKey() const { return data._count != 0; }
inline bool hasIntKey() const { return data._count == 0; }
- inline void setStrKey(StringData* k, int64 h) {
+ inline void setStrKey(StringData* k, strhash_t h) {
skey = k;
skey->incRefCount();
data._count = encodeHash(h);
@@ -753,7 +739,7 @@ class c_StableMap : public ExtObjectDataFlags<ObjectData::StableMapAttrInit|
inline int64 hashKey() const {
return data._count == 0 ? ikey : data._count;
}
- inline int32 hash() const {
+ inline int32_t hash() const {
return data._count;
}
@@ -774,9 +760,9 @@ class c_StableMap : public ExtObjectDataFlags<ObjectData::StableMapAttrInit|
Bucket** m_arBuckets;
Bucket* find(int64 h) const;
- Bucket* find(const char* k, int len, int64 prehash) const;
+ Bucket* find(const char* k, int len, strhash_t prehash) const;
Bucket** findForErase(int64 h) const;
- Bucket** findForErase(const char* k, int len, int64 prehash) const;
+ Bucket** findForErase(const char* k, int len, strhash_t prehash) const;
bool update(int64 h, CVarRef data);
bool update(StringData* key, CVarRef data);
View
13 src/runtime/vm/bytecode.cpp
@@ -2022,6 +2022,15 @@ void VMExecutionContext::invokeFunc(TypedValue* retval,
checkStack(m_stack, f);
+ if (toMerge != NULL) {
+ ASSERT(toMerge->getMain() == f);
+ toMerge->merge();
+ if (toMerge->isMergeOnly()) {
+ *retval = *toMerge->getMainReturn();
+ return;
+ }
+ }
+
ActRec* ar = m_stack.allocA();
ar->m_soff = 0;
ar->m_savedRbp = 0;
@@ -2109,10 +2118,6 @@ void VMExecutionContext::invokeFunc(TypedValue* retval,
}
}
- if (toMerge != NULL) {
- toMerge->merge();
- }
-
if (m_fp) {
reenterVM(retval, ar, extraArgs, savedSP);
} else {
View
8 src/runtime/vm/translator/targetcache.cpp
@@ -488,6 +488,14 @@ GlobalCache::lookupCreate(Handle handle, StringData* name) {
}
TypedValue*
+GlobalCache::lookupCreateAddr(void* cacheAddr, StringData* name) {
+ GlobalCache* thiz = (GlobalCache*)cacheAddr;
+ TypedValue* retval = thiz->lookupImpl<false>(name, true /* allowCreate */);
+ ASSERT(retval->m_type != KindOfRef);
+ return retval;
+}
+
+TypedValue*
BoxedGlobalCache::lookup(Handle handle, StringData* name) {
BoxedGlobalCache* thiz = (BoxedGlobalCache*)
BoxedGlobalCache::cacheAtHandle(handle);
View
1  src/runtime/vm/translator/targetcache.h
@@ -401,6 +401,7 @@ class GlobalCache {
static TypedValue* lookup(CacheHandle handle, StringData* nm);
static TypedValue* lookupCreate(CacheHandle handle, StringData* nm);
+ static TypedValue* lookupCreateAddr(void* cacheAddr, StringData* nm);
};
class BoxedGlobalCache : public GlobalCache {
View
49 src/runtime/vm/translator/translator-x64.cpp
@@ -708,7 +708,7 @@ static void
emitDerefIfVariant(X64Assembler &a, PhysReg reg) {
if (RuntimeOption::EvalJitCmovVarDeref) {
a.cmp_imm32_disp_reg32(KindOfRef, TVOFF(m_type), reg);
- a.cload_reg64_disp_reg64(CC_Z, reg, 0, reg);
+ a.cload_reg64_disp_reg64(CC_Z, reg, TVOFF(m_data), reg);
} else {
IfVariant ifVar(a, reg);
emitDeref(a, reg, reg);
@@ -721,9 +721,10 @@ static void
emitStoreTypedValue(X64Assembler& a, DataType type, PhysReg val,
int disp, PhysReg dest, bool writeType = true) {
if (writeType) {
- a. store_imm32_disp_reg(type, disp + TVOFF(m_type), dest);
+ a. store_imm32_disp_reg(type, disp + TVOFF(m_type), dest);
}
if (!IS_NULL_TYPE(type)) {
+ ASSERT(val != reg::noreg);
a. store_reg64_disp_reg64(val, disp + TVOFF(m_data), dest);
}
}
@@ -764,13 +765,19 @@ emitStoreNull(X64Assembler& a, const Location& where) {
* Emit code that does the same thing as tvSet().
*
* The `oldType' and `oldData' registers are used for temporary
- * storage and unconditionally destroyed; `toPtr' may be destroyed;
+ * storage and unconditionally destroyed.
+ * `toPtr' will be destroyed iff the cell we're storing to is
+ * KindOfRef.
+ * The variant check will not be performed if toOffset is nonzero, so
+ * only pass a nonzero offset if you know the destination is not
+ * KindOfRef.
* `from' will not be modified.
*/
void TranslatorX64::emitTvSetRegSafe(const NormalizedInstruction& i,
PhysReg from,
DataType fromType,
PhysReg toPtr,
+ int toOffset,
PhysReg oldType,
PhysReg oldData,
bool incRefFrom) {
@@ -778,10 +785,12 @@ void TranslatorX64::emitTvSetRegSafe(const NormalizedInstruction& i,
ASSERT(!i.isSimple());
ASSERT(fromType != KindOfRef);
- emitDerefIfVariant(a, toPtr);
- a. load_reg64_disp_reg32(toPtr, TVOFF(m_type), oldType);
- a. load_reg64_disp_reg64(toPtr, TVOFF(m_data), oldData);
- emitStoreTypedValue(a, fromType, from, 0, toPtr);
+ if (toOffset == 0) {
+ emitDerefIfVariant(a, toPtr);
+ }
+ a. load_reg64_disp_reg32(toPtr, toOffset + TVOFF(m_type), oldType);
+ a. load_reg64_disp_reg64(toPtr, toOffset + TVOFF(m_data), oldData);
+ emitStoreTypedValue(a, fromType, from, toOffset, toPtr);
if (incRefFrom) {
emitIncRef(from, fromType);
}
@@ -792,10 +801,12 @@ void TranslatorX64::emitTvSet(const NormalizedInstruction& i,
PhysReg from,
DataType fromType,
PhysReg toPtr,
+ int toOffset,
bool incRefFrom) {
ScratchReg oldType(m_regMap);
ScratchReg oldData(m_regMap);
- emitTvSetRegSafe(i, from, fromType, toPtr, *oldType, *oldData, incRefFrom);
+ emitTvSetRegSafe(i, from, fromType, toPtr, toOffset,
+ *oldType, *oldData, incRefFrom);
}
// Logical register move: ensures the value in src will be in dest
@@ -9237,12 +9248,12 @@ void TranslatorX64::translatePackCont(const Tracelet& t,
a. store_reg32_disp_reg64(*rZero, srcOff + TVOFF(m_type), rVmFp);
}
- int valueOff = offsetof(c_GenericContinuation, m_value);
- emitDecRefGeneric(i, rCont, valueOff);
// We're moving our reference to the value from the stack to the
// continuation object, so we don't have to incRef or decRef
Location valLoc = i.inputs[valIdx]->location;
- spillTo(i.inputs[valIdx]->outerType(), getReg(valLoc), true, rCont, valueOff);
+ emitTvSet(i, getReg(valLoc), i.inputs[valIdx]->outerType(), rCont,
+ offsetof(c_GenericContinuation, m_value), false);
+
emitImmReg(a, i.imm[0].u_IVA, *rScratch);
a. store_reg64_disp_reg64(*rScratch,
offsetof(c_GenericContinuation, m_label),
@@ -9339,9 +9350,7 @@ void TranslatorX64::translateContNext(const Tracelet& t,
// m_received.setNull()
const Offset receivedOff = offsetof(c_GenericContinuation, m_received);
- emitDecRefGeneric(i, *rCont, receivedOff);
- emitStoreImm(a, KindOfUninit, *rCont, receivedOff + TVOFF(m_type),
- sz::dword, &m_regMap);
+ emitTvSet(i, reg::noreg, KindOfNull, *rCont, receivedOff, false);
emitContPreNext(i, rCont);
}
@@ -9375,11 +9384,9 @@ void TranslatorX64::translateContSendImpl(const NormalizedInstruction& i) {
// m_received = value
const Offset receivedOff = offsetof(c_GenericContinuation, m_received);
- emitDecRefGeneric(i, *rCont, receivedOff);
PhysReg valReg = getReg(i.inputs[valIdx]->location);
DataType valType = i.inputs[valIdx]->outerType();
- emitIncRef(valReg, valType);
- spillTo(valType, valReg, true, *rCont, receivedOff);
+ emitTvSet(i, valReg, valType, *rCont, receivedOff, true);
// m_should_throw = true (maybe)
if (raise) {
@@ -10288,9 +10295,9 @@ TranslatorX64::emitGetGlobal(const NormalizedInstruction& i, int nameIdx,
}
SKTRACE(1, i.source, "ch %d\n", ch);
EMIT_CALL(a, allowCreate ? GlobalCache::lookupCreate
- : GlobalCache::lookup,
- IMM(ch),
- IMM((uint64_t)maybeName));
+ : GlobalCache::lookup,
+ IMM(ch),
+ IMM((uint64_t)maybeName));
recordCall(i);
}
@@ -10618,7 +10625,7 @@ TranslatorX64::emitPropSet(const NormalizedInstruction& i,
DataType rhsType = rhs.rtt.valueType();
// Store rhs in the field
- emitTvSet(i, rhsReg, rhsType, fieldAddr, i.outStack || rhs.isLocal());
+ emitTvSet(i, rhsReg, rhsType, fieldAddr, 0, i.outStack || rhs.isLocal());
if (!base.isLocal()) {
const int kBaseIdx = 1;
View
4 src/runtime/vm/translator/translator-x64.h
@@ -180,10 +180,10 @@ class TranslatorX64 : public Translator, public SpillFill,
int off,
PhysReg tmpReg);
void emitTvSetRegSafe(const NormalizedInstruction&, PhysReg from,
- DataType fromType, PhysReg toPtr, PhysReg tmp1, PhysReg tmp2,
+ DataType fromType, PhysReg toPtr, int toOffset, PhysReg tmp1, PhysReg tmp2,
bool incRefFrom);
void emitTvSet(const NormalizedInstruction&, PhysReg from,
- DataType fromType, PhysReg toPtr, bool incRefFrom = true);
+ DataType fromType, PhysReg toPtr, int toOffset = 0, bool incRefFrom = true);
void emitPushAR(const NormalizedInstruction& i, const Func* func,
const int bytesPopped = 0, bool isCtor = false,
View
330 src/runtime/vm/unit.cpp
@@ -22,6 +22,7 @@
#include <boost/algorithm/string.hpp>
#include <util/lock.h>
+#include <util/util.h>
#include <runtime/ext/ext_variable.h>
#include <runtime/vm/bytecode.h>
#include <runtime/vm/repo.h>
@@ -41,6 +42,8 @@ namespace HPHP {
namespace VM {
///////////////////////////////////////////////////////////////////////////////
+using Util::getDataRef;
+
static const Trace::Module TRACEMOD = Trace::hhbc;
Mutex Unit::s_classesMutex;
@@ -258,9 +261,12 @@ Unit::Unit()
m_mergeables(NULL),
m_firstHoistableFunc(0),
m_firstHoistablePreClass(0),
- m_firstMergablePreClass(0),
+ m_firstMergeablePreClass(0),
m_mergeablesSize(0),
- m_repoId(-1), m_initialMergeState(UnitMergeStateUninit) {
+ m_cacheOffset(0),
+ m_repoId(-1),
+ m_mergeState(UnitMergeStateUnmerged),
+ m_cacheMask(0) {
TV_WRITE_UNINIT(&m_mainReturn);
m_mainReturn._count = 0; // flag for whether or not the unit is mergeable
}
@@ -295,7 +301,8 @@ Unit::~Unit() {
}
}
- if (!RuntimeOption::RepoAuthoritative && m_initialMergeState) {
+ if (!RuntimeOption::RepoAuthoritative &&
+ (m_mergeState & UnitMergeStateMerged)) {
Transl::unmergePreConsts(m_preConsts, this);
}
@@ -510,14 +517,21 @@ static SimpleMutex unitInitLock(false /* reentrant */, RankUnitInit);
void Unit::initialMerge() {
unitInitLock.assertOwnedBySelf();
- if (LIKELY(m_initialMergeState == UnitMergeStateUninit)) {
- m_initialMergeState = UnitMergeStateMerging;
+ if (LIKELY(m_mergeState == UnitMergeStateUnmerged)) {
+ int state = 0;
+ m_mergeState = UnitMergeStateMerging;
+ bool allFuncsUnique = RuntimeOption::RepoAuthoritative;
for (MutableFuncRange fr(nonMainFuncs()); !fr.empty();) {
- loadFunc(fr.popFront());
+ Func* f = fr.popFront();
+ if (allFuncsUnique) {
+ allFuncsUnique = (f->attrs() & AttrUnique);
+ }
+ loadFunc(f);
}
+ if (allFuncsUnique) state |= UnitMergeStateUniqueFuncs;
if (!RuntimeOption::RepoAuthoritative) {
Transl::mergePreConsts(m_preConsts);
- } else if (isMergeOnly()) {
+ } else {
/*
* The mergeables array begins with the hoistable Func*s,
* followed by the (potenitally) hoistable Class*s.
@@ -536,122 +550,181 @@ void Unit::initialMerge() {
* the pointer will be followed by a TypedValue representing
* the value being defined/assigned.
*/
- for (int ix = m_firstMergablePreClass, end = m_mergeablesSize;
- ix < end; ++ix) {
- void *obj = mergeableObj(ix);
- InclOpFlags flags = InclOpDefault;
- UnitMergeKind k = UnitMergeKind(uintptr_t(obj) & 7);
- switch (k) {
- case UnitMergeKindDone: ASSERT(false);
- case UnitMergeKindClass: break;
- case UnitMergeKindReqMod:
- flags = InclOpDocRoot | InclOpLocal;
- goto inc;
- case UnitMergeKindReqSrc:
- flags = InclOpRelative | InclOpLocal;
- goto inc;
- case UnitMergeKindReqDoc:
- flags = InclOpDocRoot;
- goto inc;
- inc: {
+ bool allClassesUnique = true;
+ int ix = m_firstHoistablePreClass;
+ int end = m_firstMergeablePreClass;
+ while (ix < end) {
+ PreClass* pre = (PreClass*)mergeableObj(ix++);
+ if (allClassesUnique) {
+ allClassesUnique = pre->attrs() & AttrUnique;
+ }
+ }
+ if (isMergeOnly()) {
+ ix = m_firstMergeablePreClass;
+ end = m_mergeablesSize;
+ while (ix < end) {
+ void *obj = mergeableObj(ix);
+ InclOpFlags flags = InclOpDefault;
+ UnitMergeKind k = UnitMergeKind(uintptr_t(obj) & 7);
+ switch (k) {
+ case UnitMergeKindUniqueDefinedClass:
+ case UnitMergeKindDone:
+ not_reached();
+ case UnitMergeKindClass:
+ if (allClassesUnique) {
+ allClassesUnique = ((PreClass*)obj)->attrs() & AttrUnique;
+ }
+ break;
+ case UnitMergeKindReqMod:
+ flags = InclOpDocRoot | InclOpLocal;
+ goto inc;
+ case UnitMergeKindReqSrc:
+ flags = InclOpRelative | InclOpLocal;
+ goto inc;
+ case UnitMergeKindReqDoc:
+ flags = InclOpDocRoot;
+ goto inc;
+ inc: {
+ StringData* s = (StringData*)((char*)obj - (int)k);
+ HPHP::Eval::PhpFile* efile =
+ g_vmContext->lookupIncludeRoot(s, flags, NULL, this);
+ ASSERT(efile);
+ Unit* unit = efile->unit();
+ unit->initialMerge();
+ mergeableObj(ix) = (void*)((char*)unit + (int)k);
+ }
+ break;
+ case UnitMergeKindDefine: {
StringData* s = (StringData*)((char*)obj - (int)k);
- HPHP::Eval::PhpFile* efile =
- g_vmContext->lookupIncludeRoot(s, flags, NULL, this);
- ASSERT(efile);
- Unit* unit = efile->unit();
- unit->initialMerge();
- mergeableObj(ix) = (void*)((char*)unit + (int)k);
+ TypedValue* v = (TypedValue*)mergeableData(ix + 1);
+ ix += sizeof(TypedValue) / sizeof(void*);
+ v->_count = TargetCache::allocConstant(s);
+ break;
+ }
+ case UnitMergeKindGlobal: {
+ StringData* s = (StringData*)((char*)obj - (int)k);
+ TypedValue* v = (TypedValue*)mergeableData(ix + 1);
+ ix += sizeof(TypedValue) / sizeof(void*);
+ v->_count = TargetCache::GlobalCache::alloc(s);
+ break;
}
- break;
- case UnitMergeKindDefine: {
- StringData* s = (StringData*)((char*)obj - (int)k);
- TypedValue* v = (TypedValue*)mergeableData(ix + 1);
- ix += sizeof(TypedValue) / sizeof(void*);
- v->_count = TargetCache::allocConstant(s);
- break;
- }
- case UnitMergeKindGlobal: {
- StringData* s = (StringData*)((char*)obj - (int)k);
- TypedValue* v = (TypedValue*)mergeableData(ix + 1);
- ix += sizeof(TypedValue) / sizeof(void*);
- v->_count = TargetCache::GlobalCache::alloc(s);
- break;
}
+ ix++;
}
}
+ if (allClassesUnique) state |= UnitMergeStateUniqueClasses;
}
- m_initialMergeState = UnitMergeStateMerged;
+ m_mergeState = UnitMergeStateMerged | state;
}
}
-static void mergeCns(TargetCache::CacheHandle ch, TypedValue *value,
+static void mergeCns(TypedValue& tv, TypedValue *value,
StringData *name) {
- using namespace TargetCache;
- TypedValue *tv = (TypedValue*)handleToPtr(ch);
- if (LIKELY(tv->m_type == KindOfUninit &&
+ if (LIKELY(tv.m_type == KindOfUninit &&
g_vmContext->m_constants.nvInsert(name, value))) {
- tvDup(value, tv);
+ tvDup(value, &tv);
return;
}
raise_warning(Strings::CONSTANT_ALREADY_DEFINED, name->data());
}
-static void setGlobal(TargetCache::CacheHandle ch, TypedValue *value,
+static void setGlobal(void* cacheAddr, TypedValue *value,
StringData *name) {
- using namespace TargetCache;
- TypedValue* g = GlobalCache::lookupCreate(ch, name);
- tvSet(value, g);
+ tvSet(value, TargetCache::GlobalCache::lookupCreateAddr(cacheAddr, name));
}
void Unit::merge() {
- if (UNLIKELY(m_initialMergeState != UnitMergeStateMerged)) {
+ if (UNLIKELY(!(m_mergeState & UnitMergeStateMerged))) {
SimpleLock lock(unitInitLock);
- ASSERT(m_initialMergeState != UnitMergeStateMerging);
initialMerge();
- ASSERT(m_initialMergeState == UnitMergeStateMerged);
}
+ if (UNLIKELY(isDebuggerAttached())) {
+ mergeImpl<true>(TargetCache::handleToPtr(0));
+ } else {
+ mergeImpl<false>(TargetCache::handleToPtr(0));
+ }
+}
+
+template <bool debugger>
+void Unit::mergeImpl(void* tcbase) {
+ ASSERT(m_mergeState & UnitMergeStateMerged);
+
Func** it = funcHoistableBegin();
Func** fend = funcEnd();
if (it != fend) {
- bool debugger = isDebuggerAttached();
- do {
- Func* func = *it;
- ASSERT(func->top());
- setCachedFunc(func, debugger);
- } while (++it != fend);
+ if (LIKELY((m_mergeState & UnitMergeStateUniqueFuncs) != 0)) {
+ do {
+ Func* func = *it;
+ ASSERT(func->top());
+ getDataRef<Func*>(tcbase, func->getCachedOffset()) = func;
+ if (debugger) phpDefFuncHook(func);
+ } while (++it != fend);
+ } else {
+ do {
+ Func* func = *it;
+ ASSERT(func->top());
+ setCachedFunc(func, debugger);
+ } while (++it != fend);
+ }
}
bool redoHoistable = false;
int ix = m_firstHoistablePreClass;
- int end = m_firstMergablePreClass;
+ int end = m_firstMergeablePreClass;
// iterate over all the potentially hoistable classes
// with no fatals on failure
- while (ix < end) {
- PreClass* pre = (PreClass*)mergeableObj(ix++);
- if (!defClass(pre, false)) redoHoistable = true;
- }
- if (UNLIKELY(redoHoistable)) {
- // if this unit isnt mergeOnly, we're done
- if (!isMergeOnly()) return;
- // as a special case, if all the classes are potentially
- // hoistable, we dont list them twice, but instead
- // iterate over them again
- // At first glance, it may seem like we could leave
- // the maybe-hoistable classes out of the second list
- // and then always reset ix to 0; but that gets this
- // case wrong if there's an autoloader for C, and C
- // extends B:
- //
- // class A {}
- // class B implements I {}
- // class D extends C {}
- //
- // because now A and D go on the maybe-hoistable list
- // B goes on the never hoistable list, and we
- // fatal trying to instantiate D before B
- if (end == (int)m_mergeablesSize) ix = m_firstHoistablePreClass;
+ if (ix < end) {
+ if (LIKELY((m_mergeState & UnitMergeStateUniqueDefinedClasses) != 0)) {
+ do {
+ PreClass* pre = (PreClass*)mergeableObj(ix++);
+ Class* cls = *pre->namedEntity()->clsList();
+ ASSERT(cls && !cls->m_nextClass);
+ ASSERT(cls->preClass() == pre);
+ if (Class* parent = cls->parent()) {
+ if (UNLIKELY(!getDataRef<Class*>(tcbase, parent->m_cachedOffset))) {
+ redoHoistable = true;
+ continue;
+ }
+ }
+ getDataRef<Class*>(tcbase, cls->m_cachedOffset) = cls;
+ if (debugger) phpDefClassHook(cls);
+ } while (ix < end);
+ } else {
+ do {
+ PreClass* pre = (PreClass*)mergeableObj(ix++);
+ if (UNLIKELY(!defClass(pre, false))) redoHoistable = true;
+ } while (ix < end);
+ }
+ if (UNLIKELY(redoHoistable)) {
+ // if this unit isnt mergeOnly, we're done
+ if (!isMergeOnly()) return;
+ // as a special case, if all the classes are potentially
+ // hoistable, we dont list them twice, but instead
+ // iterate over them again
+ // At first glance, it may seem like we could leave
+ // the maybe-hoistable classes out of the second list
+ // and then always reset ix to 0; but that gets this
+ // case wrong if there's an autoloader for C, and C
+ // extends B:
+ //
+ // class A {}
+ // class B implements I {}
+ // class D extends C {}
+ //
+ // because now A and D go on the maybe-hoistable list
+ // B goes on the never hoistable list, and we
+ // fatal trying to instantiate D before B
+ if (end == (int)m_mergeablesSize) {
+ ix = m_firstHoistablePreClass;
+ do {
+ PreClass* pre = (PreClass*)mergeableObj(ix++);
+ defClass(pre, true);
+ } while (ix < end);
+ return;
+ }
+ }
}
// iterate over all but the guaranteed hoistable classes
@@ -668,11 +741,27 @@ void Unit::merge() {
} while (!k);
continue;
+ case UnitMergeKindUniqueDefinedClass:
+ do {
+ Class* other = NULL;
+ Class* cls = (Class*)((char*)obj - (int)k);
+ Class::Avail avail = cls->avail(other, true);
+ if (UNLIKELY(avail == Class::AvailFail)) {
+ raise_error("unknown class %s", other->name()->data());
+ }
+ ASSERT(avail == Class::AvailTrue);
+ getDataRef<Class*>(tcbase, cls->m_cachedOffset) = cls;
+ if (debugger) phpDefClassHook(cls);
+ obj = mergeableObj(++ix);
+ k = UnitMergeKind(uintptr_t(obj) & 7);
+ } while (k == UnitMergeKindUniqueDefinedClass);
+ continue;
+
case UnitMergeKindDefine:
do {
StringData* name = (StringData*)((char*)obj - (int)k);
TypedValue *v = (TypedValue*)mergeableData(ix + 1);
- mergeCns(v->_count, v, name);
+ mergeCns(getDataRef<TypedValue>(tcbase, v->_count), v, name);
ix += 1 + sizeof(TypedValue) / sizeof(void*);
obj = mergeableObj(ix);
k = UnitMergeKind(uintptr_t(obj) & 7);
@@ -683,7 +772,7 @@ void Unit::merge() {
do {
StringData* name = (StringData*)((char*)obj - (int)k);
TypedValue *v = (TypedValue*)mergeableData(ix + 1);
- setGlobal(v->_count, v, name);
+ setGlobal(&getDataRef<char>(tcbase, v->_count), v, name);
ix += 1 + sizeof(TypedValue) / sizeof(void*);
obj = mergeableObj(ix);
k = UnitMergeKind(uintptr_t(obj) & 7);
@@ -695,13 +784,16 @@ void Unit::merge() {
case UnitMergeKindReqDoc:
do {
Unit *unit = (Unit*)((char*)obj - (int)k);
- if (!TargetCache::testAndSetBit(unit->m_cacheId)) {
- unit->merge();
+ uchar& unitLoadedFlags =
+ getDataRef<uchar>(tcbase, unit->m_cacheOffset);
+ if (!(unitLoadedFlags & unit->m_cacheMask)) {
+ unitLoadedFlags |= unit->m_cacheMask;
+ unit->mergeImpl<debugger>(tcbase);
if (UNLIKELY(!unit->isMergeOnly())) {
Stats::inc(Stats::PseudoMain_Reentered);
TypedValue ret;
g_vmContext->invokeFunc(&ret, unit->getMain(), Array(),
- NULL, NULL, NULL, NULL, unit);
+ NULL, NULL, NULL, NULL, NULL);
tvRefcountedDecRef(&ret);
} else {
Stats::inc(Stats::PseudoMain_SkipDeep);
@@ -715,6 +807,46 @@ void Unit::merge() {
continue;
case UnitMergeKindDone:
ASSERT((unsigned)ix == m_mergeablesSize);
+ if (UNLIKELY((m_mergeState & (UnitMergeStateUniqueClasses|
+ UnitMergeStateUniqueDefinedClasses)) ==
+ UnitMergeStateUniqueClasses)) {
+ /*
+ * All the classes are known to be unique, and we just got
+ * here, so all were successfully defined. We can now go
+ * back and convert all UnitMergeKindClass entries to
+ * UnitMergeKindUniqueDefinedClass.
+ *
+ * This is a pure optimization: whether readers see the
+ * old value or the new does not affect correctness.
+ * Also, its idempotent - even if multiple threads do
+ * this update simultaneously, they all make exactly the
+ * same change.
+ */
+ m_mergeState |= UnitMergeStateUniqueDefinedClasses;
+ end = ix;
+ ix = m_firstMergeablePreClass;
+ do {
+ obj = mergeableObj(ix);
+ k = UnitMergeKind(uintptr_t(obj) & 7);
+ switch (k) {
+ case UnitMergeKindClass: {
+ PreClass* pre = (PreClass*)obj;
+ Class* cls = *pre->namedEntity()->clsList();
+ ASSERT(cls && !cls->m_nextClass);
+ ASSERT(cls->preClass() == pre);
+ mergeableObj(ix) =
+ (char*)cls + (int)UnitMergeKindUniqueDefinedClass;
+ break;
+ }
+ case UnitMergeKindDefine:
+ case UnitMergeKindGlobal:
+ ix += sizeof(TypedValue) / sizeof(void*);
+ break;
+ default:
+ break;
+ }
+ } while (++ix < end);
+ }
return;
}
// Normal cases should continue, KindDone returns
@@ -1788,7 +1920,9 @@ void UnitEmitter::commit(UnitOrigin unitOrigin) {
}
for (int i = 0, n = m_mergeableStmts.size(); i < n; i++) {
switch (m_mergeableStmts[i].first) {
- case UnitMergeKindDone: ASSERT(false);
+ case UnitMergeKindDone:
+ case UnitMergeKindUniqueDefinedClass:
+ not_reached();
case UnitMergeKindClass: break;
case UnitMergeKindReqMod:
case UnitMergeKindReqSrc:
@@ -1910,7 +2044,7 @@ Unit* UnitEmitter::create() {
it != m_hoistablePceIdVec.end(); ++it) {
u->mergeableObj(ix++) = u->m_preClasses[*it].get();
}
- u->m_firstMergablePreClass = ix;
+ u->m_firstMergeablePreClass = ix;
if (u->m_mainReturn._count && !m_allClassesHoistable) {
for (MergeableStmtVec::const_iterator it = m_mergeableStmts.begin();
it != m_mergeableStmts.end(); ++it) {
@@ -1937,7 +2071,9 @@ Unit* UnitEmitter::create() {
ASSERT(sizeof(TypedValue) % sizeof(void*) == 0);
break;
}
- case UnitMergeKindDone: ASSERT(false);
+ case UnitMergeKindDone:
+ case UnitMergeKindUniqueDefinedClass:
+ not_reached();
}
}
}
View
32 src/runtime/vm/unit.h
@@ -40,22 +40,27 @@ enum UnitOrigin {
enum UnitMergeKind {
UnitMergeKindClass = 0,
- UnitMergeKindDefine = 1,
- UnitMergeKindGlobal = 2,
- UnitMergeKindDone = 3,
+ UnitMergeKindUniqueDefinedClass = 1,
+ UnitMergeKindDefine = 2,
+ UnitMergeKindGlobal = 3,
UnitMergeKindReqMod = 4, // used by isMergeKindReq
UnitMergeKindReqSrc = 5, // "
UnitMergeKindReqDoc = 6, // "
+ UnitMergeKindDone = 7,
};
enum UnitMergeState {
- UnitMergeStateUninit = 0,
+ UnitMergeStateUnmerged = 0,
UnitMergeStateMerging = 1,
- UnitMergeStateMerged = 2
+ UnitMergeStateMerged = 2,
+ UnitMergeStateUniqueFuncs = 4,
+ UnitMergeStateUniqueClasses = 8,
+ UnitMergeStateUniqueDefinedClasses = 16
};
inline bool ALWAYS_INLINE isMergeKindReq(UnitMergeKind k) {
- return k & 4;
+ return unsigned(k - UnitMergeKindReqMod) <=
+ unsigned(UnitMergeKindReqDoc - UnitMergeKindReqMod);
}
typedef const uchar* PC;
@@ -451,6 +456,8 @@ struct Unit {
}
void*& mergeableObj(int ix) { return ((void**)m_mergeables)[ix]; }
void* mergeableData(int ix) { return (char*)m_mergeables + ix*sizeof(void*); }
+ template <bool debugger>
+ void mergeImpl(void* tcbase);
public:
Func* getMain() const {
return *funcBegin();
@@ -512,8 +519,10 @@ struct Unit {
const Func* getFunc(Offset pc) const;
void enableIntercepts();
-
- void setCacheId(unsigned id) { m_cacheId = id; }
+ void setCacheId(unsigned id) {
+ m_cacheOffset = id >> 3;
+ m_cacheMask = 1 << (id & 7);
+ }
bool isMergeOnly() const { return m_mainReturn._count; }
void clearMergeOnly() { m_mainReturn._count = 0; }
public:
@@ -555,11 +564,12 @@ struct Unit {
void* m_mergeables;
unsigned m_firstHoistableFunc;
unsigned m_firstHoistablePreClass;
- unsigned m_firstMergablePreClass;
+ unsigned m_firstMergeablePreClass;
unsigned m_mergeablesSize;
- unsigned m_cacheId;
+ unsigned m_cacheOffset;
int8 m_repoId;
- int8 m_initialMergeState;
+ uint8 m_mergeState;
+ uint8 m_cacheMask;
LineTable m_lineTable;
FuncTable m_funcTable;
PreConstVec m_preConsts;
View
28 src/runtime/vm/vm.cpp
@@ -26,6 +26,7 @@
#include <runtime/vm/translator/translator.h>
#include <runtime/vm/translator/targetcache.h>
#include <runtime/vm/translator/fixup.h>
+#include <runtime/vm/translator/translator-x64.h>
#include <runtime/eval/runtime/file_repository.h>
#include <system/lib/systemlib.h>
#include <util/logger.h>
@@ -118,6 +119,25 @@ void ProcessInit() {
// ensure that nextTx64 and tx64 are set
(void)VM::Transl::Translator::Get();
+ if (!RuntimeOption::RepoAuthoritative &&
+ RuntimeOption::EvalJitEnableRenameFunction &&
+ RuntimeOption::EvalJit) {
+ VM::Func::enableIntercept();
+ VM::Transl::TranslatorX64* tx64 = VM::Transl::TranslatorX64::Get();
+ tx64->enableIntercepts();
+ }
+ // Save the current options, and set things up so that
+ // systemlib.php can be read from and stored in the
+ // normal repo.
+ bool db = RuntimeOption::EvalDumpBytecode;
+ bool p = RuntimeOption::RepoAuthoritative;
+ bool rp = RuntimeOption::AlwaysUseRelativePath;
+ bool sf = RuntimeOption::SafeFileAccess;
+ RuntimeOption::EvalDumpBytecode = false;
+ RuntimeOption::RepoAuthoritative = false;
+ RuntimeOption::AlwaysUseRelativePath = false;
+ RuntimeOption::SafeFileAccess = false;
+
Transl::TargetCache::requestInit();
Unit* nativeFuncUnit = build_native_func_unit(hhbc_ext_funcs,
@@ -192,6 +212,14 @@ void ProcessInit() {
file->incRef();
SystemLib::s_unit = file->unit();
+ // Restore settings before merging anything,
+ // because of optimizations that depend on the
+ // setting of RepoAuthoritative
+ RuntimeOption::EvalDumpBytecode = db;
+ RuntimeOption::RepoAuthoritative = p;
+ RuntimeOption::AlwaysUseRelativePath = rp;
+ RuntimeOption::SafeFileAccess = sf;
+
// Load the systemlib unit to build the Class objects
SystemLib::s_unit->merge();
View
47 src/test/test_code_run.cpp
@@ -24021,6 +24021,53 @@ bool TestCodeRun::TestYield() {
" int(3)\n"
"}\n");
+ // Testing destructor/assignment corner cases
+ MVCRO("<?php\n"
+ "\n"
+ "class Evil {\n"
+ " public func