llinstancetracker.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /**
  2. * @file llinstancetracker.h
  3. * @brief LLInstanceTracker is a mixin class that automatically tracks object
  4. * instances with or without an associated key
  5. *
  6. * $LicenseInfo:firstyear=2000&license=viewergpl$
  7. *
  8. * Copyright (c) 2010, Linden Research, Inc.
  9. *
  10. * Second Life Viewer Source Code
  11. * The source code in this file ("Source Code") is provided by Linden Lab
  12. * to you under the terms of the GNU General Public License, version 2.0
  13. * ("GPL"), unless you have obtained a separate licensing agreement
  14. * ("Other License"), formally executed by you and Linden Lab. Terms of
  15. * the GPL can be found in doc/GPL-license.txt in this distribution, or
  16. * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
  17. *
  18. * There are special exceptions to the terms and conditions of the GPL as
  19. * it is applied to this Source Code. View the full text of the exception
  20. * in the file doc/FLOSS-exception.txt in this software distribution, or
  21. * online at
  22. * http://secondlifegrid.net/programs/open_source/licensing/flossexception
  23. *
  24. * By copying, modifying or distributing this software, you acknowledge
  25. * that you have read and understood your obligations described above,
  26. * and agree to abide by those obligations.
  27. *
  28. * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
  29. * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
  30. * COMPLETENESS OR PERFORMANCE.
  31. * $/LicenseInfo$
  32. */
  33. #ifndef LL_LLINSTANCETRACKER_H
  34. #define LL_LLINSTANCETRACKER_H
  35. #if LL_WINDOWS
  36. # pragma warning (push)
  37. # pragma warning (disable:4265)
  38. #endif
  39. #include <mutex>
  40. #if LL_WINDOWS
  41. # pragma warning (pop)
  42. #endif
  43. #include <memory>
  44. #include <typeinfo>
  45. #include <vector>
  46. #include "boost/iterator/filter_iterator.hpp"
  47. #include "boost/iterator/indirect_iterator.hpp"
  48. #include "boost/iterator/transform_iterator.hpp"
  49. #include "llerror.h"
  50. #include "hbfastmap.h"
  51. #include "hbfastset.h"
  52. namespace LLInstanceTrackerPrivate
  53. {
  54. struct StaticBase
  55. {
  56. // We need to be able to lock static data while manipulating it.
  57. std::mutex mMutex;
  58. };
  59. // Instantiate this template to obtain a pointer to the canonical static
  60. // instance of Static while holding a lock on that instance. Use of
  61. // Static::mMutex presumes that Static declares some suitable mMutex.
  62. // NOTE: this template class is defined in a lockstatic.h header in LL's
  63. // sources, but since it is only used by LLInstanceTracker, I moved it here. HB
  64. template <typename Static>
  65. class LockStatic
  66. {
  67. typedef std::unique_lock<decltype(Static::mMutex)> lock_t;
  68. public:
  69. LockStatic()
  70. : mData(getStatic()),
  71. mLock(mData->mMutex)
  72. {
  73. }
  74. LL_INLINE Static* get() const { return mData; }
  75. LL_INLINE operator Static*() const { return get(); }
  76. LL_INLINE Static* operator->() const { return get(); }
  77. // Sometimes we must explicitly unlock...
  78. LL_INLINE void unlock()
  79. {
  80. // ... But once we do, access is no longer permitted !
  81. mData = NULL;
  82. mLock.unlock();
  83. }
  84. private:
  85. Static* getStatic()
  86. {
  87. // Static::mMutex must be function-local static rather than class-
  88. // static. Some of our consumers must function properly (therefore
  89. // lock properly) even when the containing module's static variables
  90. // have not yet been runtime-initialized while a mutex requires
  91. // construction and a static class member might not yet have been
  92. // constructed.
  93. // We could store a dumb mutex_t*, notice when it is NULL and allocate
  94. // a heap mutex, but this is vulnerable to race conditions. And we
  95. // cannot defend the dumb pointer with another mutex.
  96. // We could store a std::atomic<mutex_t*> but a default-constructed T !
  97. // Which means std::atomic, too, requires runtime initialization.
  98. // A function-local static is guaranteed to be initialized exactly
  99. // once: the first time control reaches that declaration.
  100. static Static sData;
  101. return &sData;
  102. }
  103. protected:
  104. Static* mData;
  105. lock_t mLock;
  106. };
  107. } // End namespace LLInstanceTrackerPrivate
  108. enum EInstanceTrackerAllowKeyCollisions
  109. {
  110. LLInstanceTrackerErrorOnCollision,
  111. LLInstanceTrackerReplaceOnCollision
  112. };
  113. // This mix-in class adds support for tracking all instances of the specified
  114. // class parameter T. The (optional) key associates a value of type KEY with a
  115. // given instance of T, for quick lookup. If KEY is not provided, then
  116. // instances are stored into a simple unordered_set.
  117. // NOTE: see explicit specialization below for default KEY==void case
  118. template<typename T, typename KEY = void,
  119. EInstanceTrackerAllowKeyCollisions KEY_COLLISION_BEHAVIOR =
  120. LLInstanceTrackerErrorOnCollision>
  121. class LLInstanceTracker
  122. {
  123. typedef typename flat_hmap<KEY, std::shared_ptr<T> > InstanceMap;
  124. struct StaticData : public LLInstanceTrackerPrivate::StaticBase
  125. {
  126. InstanceMap mMap;
  127. };
  128. typedef LLInstanceTrackerPrivate::LockStatic<StaticData> LockStatic;
  129. public:
  130. using ptr_t = std::shared_ptr<T>;
  131. using weak_t = std::weak_ptr<T>;
  132. // No-copy
  133. LLInstanceTracker(const LLInstanceTracker&) = delete;
  134. const LLInstanceTracker& operator=(const LLInstanceTracker&) = delete;
  135. // Storing a dumb T* somewhere external is a bad idea, since the
  136. // LLInstanceTracker subclasses are explicitly destroyed rather than
  137. // managed by smart pointers. It is legal to declare stack instances of an
  138. // LLInstanceTracker subclass.
  139. // But it is reasonable to store a std::weak_ptr<T>, which will become
  140. // invalid when the T instance is destroyed.
  141. LL_INLINE weak_t getWeak() { return mSelf; }
  142. LL_INLINE static size_t instanceCount() { return LockStatic()->mMap.size(); }
  143. // Snapshot of std::pair<const KEY, std::shared_ptr<T>> pairs
  144. class snapshot
  145. {
  146. private:
  147. // It is very important that what we store in this snapshot are weak
  148. // pointers, NOT shared pointers. This is how we discover whether any
  149. // instance has been deleted during the lifespan of a snapshot.
  150. typedef std::vector<std::pair<const KEY, weak_t> > VectorType;
  151. // Dereferencing our iterator produces a std::shared_ptr for each
  152. // instance that still exists. Since we store weak_ptrs, that involves
  153. // two chained transformations:
  154. // - A transform_iterator to lock the weak_ptr and return a shared_ptr
  155. // - A filter_iterator to skip any shared_ptr that has become invalid.
  156. // It is very important that we filter lazily, that is, during
  157. // traversal. Any one of our stored weak_ptrs might expire during
  158. // traversal.
  159. typedef std::pair<const KEY, ptr_t> strong_pair;
  160. // Note for future reference: Nat has not yet had any luck (up to Boost
  161. // 1.67) trying to use boost::transform_iterator with a hand-coded
  162. // functor, only with actual functions. In my experience, an internal
  163. // boost::result_of() operation fails, even with an explicit
  164. // result_type typedef. But this works.
  165. LL_INLINE static strong_pair strengthen(typename VectorType::value_type& pair)
  166. {
  167. return { pair.first, pair.second.lock() };
  168. }
  169. LL_INLINE static bool dead_skipper(const strong_pair& pair)
  170. {
  171. return bool(pair.second);
  172. }
  173. public:
  174. snapshot()
  175. // Populate our vector with a snapshot of (locked !) InstanceMap
  176. // Note: this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
  177. : mData(mLock->mMap.begin(), mLock->mMap.end())
  178. {
  179. // Release the lock once we have populated mData
  180. mLock.unlock();
  181. }
  182. // You cannot make a transform_iterator (or anything else) that
  183. // literally stores a C++ function (decltype(strengthen)), but you can
  184. // make a transform_iterator based on a _function pointer._
  185. typedef boost::transform_iterator<decltype(strengthen)*,
  186. typename VectorType::iterator> strong_iterator;
  187. typedef boost::filter_iterator<decltype(dead_skipper)*,
  188. strong_iterator> iterator;
  189. LL_INLINE iterator begin() { return make_iterator(mData.begin()); }
  190. LL_INLINE iterator end() { return make_iterator(mData.end()); }
  191. private:
  192. iterator make_iterator(typename VectorType::iterator iter)
  193. {
  194. // transform_iterator only needs the base iterator and the
  195. // transform. filter_iterator wants the predicate and both ends of
  196. // the range.
  197. return iterator(dead_skipper,
  198. strong_iterator(iter, strengthen),
  199. strong_iterator(mData.end(), strengthen));
  200. }
  201. private:
  202. // Lock static data during construction
  203. #if !LL_WINDOWS
  204. LockStatic mLock;
  205. #else
  206. // We want to be able to use (e.g.) our instance_snapshot subclass as:
  207. // for (auto& inst : T::instance_snapshot()) ...
  208. // But when this snapshot base class directly contains LockStatic, as
  209. // above, VS2017 requires us to code instead:
  210. // for (auto& inst : std::move(T::instance_snapshot())) ...
  211. // Nat thinks this should be unnecessary, as an anonymous class
  212. // instance is already a temporary. It should not need to be cast to
  213. // rvalue reference (the role of std::move()). clang evidently agrees,
  214. // as the short form works fine with Xcode on Mac.
  215. // To support the succinct usage, instead of directly storing
  216. // LockStatic, store std::shared_ptr<LockStatic>, which is copyable.
  217. std::shared_ptr<LockStatic> mLockp{ std::make_shared<LockStatic>() };
  218. LockStatic& mLock{ *mLockp };
  219. #endif
  220. VectorType mData;
  221. };
  222. // Iterate over this for references to each instance
  223. class instance_snapshot : public snapshot
  224. {
  225. private:
  226. LL_INLINE static T& instance_getter(typename snapshot::iterator::reference pair)
  227. {
  228. return *pair.second;
  229. }
  230. public:
  231. typedef boost::transform_iterator<decltype(instance_getter)*,
  232. typename snapshot::iterator> iterator;
  233. LL_INLINE iterator begin()
  234. {
  235. return iterator(snapshot::begin(), instance_getter);
  236. }
  237. LL_INLINE iterator end()
  238. {
  239. return iterator(snapshot::end(), instance_getter);
  240. }
  241. LL_INLINE void deleteAll()
  242. {
  243. for (auto it = snapshot::begin(), end = snapshot::end();
  244. it != end; ++it)
  245. {
  246. delete it->second.get();
  247. }
  248. }
  249. };
  250. // Iterate over this for each key
  251. class key_snapshot : public snapshot
  252. {
  253. private:
  254. LL_INLINE static KEY key_getter(typename snapshot::iterator::reference pair)
  255. {
  256. return pair.first;
  257. }
  258. public:
  259. typedef boost::transform_iterator<decltype(key_getter)*,
  260. typename snapshot::iterator> iterator;
  261. LL_INLINE iterator begin()
  262. {
  263. return iterator(snapshot::begin(), key_getter);
  264. }
  265. LL_INLINE iterator end()
  266. {
  267. return iterator(snapshot::end(), key_getter);
  268. }
  269. };
  270. // Note: renamed from 'getInstance()' since otherwise it conflicts with
  271. // LLSingleton::getInstance() when the class is both a singleton and a
  272. // tracked instance... HB
  273. static ptr_t getNamedInstance(const KEY& k)
  274. {
  275. LockStatic lock;
  276. const InstanceMap& map = lock->mMap;
  277. typename InstanceMap::const_iterator found = map.find(k);
  278. return found == map.end() ? NULL : found->second;
  279. }
  280. // While iterating over instances, we might want to request the key
  281. LL_INLINE virtual const KEY& getKey() const { return mInstanceKey; }
  282. protected:
  283. LLInstanceTracker(KEY key)
  284. {
  285. // We do not intend to manage the lifespan of this object with
  286. // shared_ptr, so give it a no-op deleter. We store shared_ptrs in our
  287. // InstanceMap specifically so snapshot can store weak_ptrs so we can
  288. // detect deletions during traversals.
  289. ptr_t ptr((T*)this, [](T*){});
  290. // Save corresponding weak_ptr for future reference
  291. mSelf = ptr;
  292. LockStatic lock;
  293. add_(lock, key, ptr);
  294. }
  295. virtual ~LLInstanceTracker()
  296. {
  297. LockStatic lock;
  298. remove_(lock);
  299. }
  300. private:
  301. void add_(LockStatic& lock, const KEY& key, const ptr_t& ptr)
  302. {
  303. mInstanceKey = key;
  304. InstanceMap& map = lock->mMap;
  305. if (KEY_COLLISION_BEHAVIOR == LLInstanceTrackerErrorOnCollision)
  306. {
  307. auto pair = map.emplace(key, ptr);
  308. if (!pair.second)
  309. {
  310. llerrs << "Key " << key
  311. << " already exists in instance map for "
  312. << LLError::className(typeid(*this)) << llendl;
  313. }
  314. }
  315. else
  316. {
  317. map[key] = ptr;
  318. }
  319. }
  320. ptr_t remove_(LockStatic& lock)
  321. {
  322. InstanceMap& map = lock->mMap;
  323. typename InstanceMap::iterator iter = map.find(mInstanceKey);
  324. if (iter != map.end())
  325. {
  326. auto ret = iter->second;
  327. map.erase(iter);
  328. return ret;
  329. }
  330. return {};
  331. }
  332. private:
  333. // Storing a weak_ptr to self is a bit like deriving from
  334. // std::enable_shared_from_this(), except more explicit.
  335. weak_t mSelf;
  336. KEY mInstanceKey;
  337. };
  338. // Explicit specialization for default case where KEY is void using an
  339. // unordered set<T*>
  340. template<typename T, EInstanceTrackerAllowKeyCollisions KEY_COLLISION_BEHAVIOR>
  341. class LLInstanceTracker<T, void, KEY_COLLISION_BEHAVIOR>
  342. {
  343. typedef typename flat_hset<std::shared_ptr<T> > InstanceSet;
  344. struct StaticData : public LLInstanceTrackerPrivate::StaticBase
  345. {
  346. InstanceSet mSet;
  347. };
  348. typedef LLInstanceTrackerPrivate::LockStatic<StaticData> LockStatic;
  349. public:
  350. using ptr_t = std::shared_ptr<T>;
  351. using weak_t = std::weak_ptr<T>;
  352. // Storing a dumb T* somewhere external is a bad idea, since the
  353. // LLInstanceTracker subclasses are explicitly destroyed rather than
  354. // managed by smart pointers. It is legal to declare stack instances of an
  355. // LLInstanceTracker subclass.
  356. // But it is reasonable to store a std::weak_ptr<T>, which will become
  357. // invalid when the T instance is destroyed.
  358. LL_INLINE weak_t getWeak() { return mSelf; }
  359. LL_INLINE static size_t instanceCount() { return LockStatic()->mSet.size(); }
  360. // Snapshot of std::shared_ptr<T> pointers
  361. class snapshot
  362. {
  363. private:
  364. // It is very important that what we store in this snapshot are weak
  365. // pointers, NOT shared pointers. This is how we discover whether any
  366. // instance has been deleted during the lifespan of a snapshot.
  367. typedef std::vector<weak_t> VectorType;
  368. // Dereferencing our iterator produces a std::shared_ptr for each
  369. // instance that still exists. Since we store weak_ptrs, that involves
  370. // two chained transformations:
  371. // - A transform_iterator to lock the weak_ptr and return a shared_ptr
  372. // - A filter_iterator to skip any shared_ptr that has become invalid.
  373. typedef std::shared_ptr<T> strong_ptr;
  374. LL_INLINE static strong_ptr strengthen(typename VectorType::value_type& ptr)
  375. {
  376. return ptr.lock();
  377. }
  378. LL_INLINE static bool dead_skipper(const strong_ptr& ptr)
  379. {
  380. return bool(ptr);
  381. }
  382. public:
  383. snapshot()
  384. // Populate our vector with a snapshot of (locked !) InstanceSet
  385. // Note: this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
  386. : mData(mLock->mSet.begin(), mLock->mSet.end())
  387. {
  388. // Release the lock once we have populated mData
  389. mLock.unlock();
  390. }
  391. typedef boost::transform_iterator<decltype(strengthen)*,
  392. typename VectorType::iterator> strong_iterator;
  393. typedef boost::filter_iterator<decltype(dead_skipper)*,
  394. strong_iterator> iterator;
  395. LL_INLINE iterator begin() { return make_iterator(mData.begin()); }
  396. LL_INLINE iterator end() { return make_iterator(mData.end()); }
  397. private:
  398. iterator make_iterator(typename VectorType::iterator iter)
  399. {
  400. // transform_iterator only needs the base iterator and the
  401. // transform. filter_iterator wants the predicate and both ends of
  402. // the range.
  403. return iterator(dead_skipper,
  404. strong_iterator(iter, strengthen),
  405. strong_iterator(mData.end(), strengthen));
  406. }
  407. private:
  408. // Lock static data during construction
  409. #if !LL_WINDOWS
  410. LockStatic mLock;
  411. #else
  412. // We want to be able to use (e.g.) our instance_snapshot subclass as:
  413. // for (auto& inst : T::instance_snapshot()) ...
  414. // But when this snapshot base class directly contains LockStatic, as
  415. // above, VS2017 requires us to code instead:
  416. // for (auto& inst : std::move(T::instance_snapshot())) ...
  417. // Nat thinks this should be unnecessary, as an anonymous class
  418. // instance is already a temporary. It shouldn't need to be cast to
  419. // rvalue reference (the role of std::move()). clang evidently agrees,
  420. // as the short form works fine with Xcode on Mac.
  421. // To support the succinct usage, instead of directly storing
  422. // LockStatic, store std::shared_ptr<LockStatic>, which is copyable.
  423. std::shared_ptr<LockStatic> mLockp{ std::make_shared<LockStatic>() };
  424. LockStatic& mLock{ *mLockp };
  425. #endif
  426. VectorType mData;
  427. };
  428. // Iterate over this for references to each instance
  429. class instance_snapshot : public snapshot
  430. {
  431. public:
  432. typedef boost::indirect_iterator<typename snapshot::iterator> iterator;
  433. LL_INLINE iterator begin()
  434. {
  435. return iterator(snapshot::begin());
  436. }
  437. LL_INLINE iterator end()
  438. {
  439. return iterator(snapshot::end());
  440. }
  441. LL_INLINE void deleteAll()
  442. {
  443. for (auto it = snapshot::begin(), end = snapshot::end();
  444. it != end; ++it)
  445. {
  446. delete it->get();
  447. }
  448. }
  449. };
  450. protected:
  451. LLInstanceTracker()
  452. {
  453. // Since we do not intend for this shared_ptr to manage lifespan, give
  454. // it a no-op deleter.
  455. std::shared_ptr<T> ptr((T*)this, [](T*){});
  456. // Save corresponding weak_ptr for future reference
  457. mSelf = ptr;
  458. // Also store it in our class-static set to track this instance.
  459. LockStatic()->mSet.emplace(ptr);
  460. }
  461. virtual ~LLInstanceTracker()
  462. {
  463. // Convert weak_ptr to shared_ptr because this is what we store in our
  464. // InstanceSet.
  465. LockStatic()->mSet.erase(mSelf.lock());
  466. }
  467. LLInstanceTracker(const LLInstanceTracker& other)
  468. : LLInstanceTracker()
  469. {
  470. }
  471. private:
  472. // Storing a weak_ptr to self is a bit like deriving from
  473. // std::enable_shared_from_this(), except more explicit.
  474. weak_t mSelf;
  475. };
  476. #endif