llvertexbuffer.cpp 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172
  1. /**
  2. * @file llvertexbuffer.cpp
  3. * @brief LLVertexBuffer implementation
  4. *
  5. * $LicenseInfo:firstyear=2003&license=viewergpl$
  6. *
  7. * Copyright (c) 2003-2009, Linden Research, Inc.
  8. *
  9. * Second Life Viewer Source Code
  10. * The source code in this file ("Source Code") is provided by Linden Lab
  11. * to you under the terms of the GNU General Public License, version 2.0
  12. * ("GPL"), unless you have obtained a separate licensing agreement
  13. * ("Other License"), formally executed by you and Linden Lab. Terms of
  14. * the GPL can be found in doc/GPL-license.txt in this distribution, or
  15. * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
  16. *
  17. * There are special exceptions to the terms and conditions of the GPL as
  18. * it is applied to this Source Code. View the full text of the exception
  19. * in the file doc/FLOSS-exception.txt in this software distribution, or
  20. * online at
  21. * http://secondlifegrid.net/programs/open_source/licensing/flossexception
  22. *
  23. * By copying, modifying or distributing this software, you acknowledge
  24. * that you have read and understood your obligations described above,
  25. * and agree to abide by those obligations.
  26. *
  27. * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
  28. * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
  29. * COMPLETENESS OR PERFORMANCE.
  30. * $/LicenseInfo$
  31. */
  32. #include "linden_common.h"
  33. #include <algorithm> // For std::sort
  34. #include <utility> // For std::move
  35. #include "llvertexbuffer.h"
  36. #include "llapp.h" // For LLApp::isExiting()
  37. #include "llglslshader.h"
  38. #include "llmemory.h" // For ll_aligned_*()
  39. #include "llrender.h" // For LLRender::sCurrentFrame
  40. #include "llshadermgr.h"
  41. #include "llsys.h"
  42. #include "hbtracy.h"
  43. // Define to 1 to enable (but this is slow).
  44. #define TRACE_WITH_TRACY 0
  45. #if !TRACE_WITH_TRACY
  46. # undef LL_TRACY_TIMER
  47. # define LL_TRACY_TIMER(name)
  48. #endif
  49. // Helper functions
  50. // Next Highest Power Of Two: returns first number > v that is a power of 2, or
  51. // v if v is already a power of 2
  52. U32 nhpo2(U32 v)
  53. {
  54. U32 r = 2;
  55. while (r < v) r *= 2;
  56. return r;
  57. }
  58. // Which power of 2 is i ? Assumes i is a power of 2 > 0.
  59. U32 wpo2(U32 i)
  60. {
  61. llassert(i > 0 && nhpo2(i) == i);
  62. U32 r = 0;
  63. while (i >>= 1) ++r;
  64. return r;
  65. }
  66. static void flush_vbo(GLenum target, U32 start, U32 end, U8* data)
  67. {
  68. if (end)
  69. {
  70. constexpr U32 block_size = 8192;
  71. for (U32 i = start; i <= end; i += block_size)
  72. {
  73. U32 tend = llmin(i + block_size, end);
  74. glBufferSubData(target, i, tend - i + 1, data + (i - start));
  75. }
  76. }
  77. }
  78. ///////////////////////////////////////////////////////////////////////////////
  79. // LLVBOPool class
  80. // GL name pools for vertex buffers
  81. ///////////////////////////////////////////////////////////////////////////////
  82. constexpr U32 POOL_SIZE = 4096;
  83. class LLVBOPool
  84. {
  85. protected:
  86. LOG_CLASS(LLVBOPool);
  87. public:
  88. LL_INLINE LLVBOPool()
  89. : mAllocated(0),
  90. mReserved(0),
  91. mRequested(0),
  92. mBufferCount(0),
  93. mTotalHits(0),
  94. mAllocCount(0),
  95. mMissCount(0),
  96. mSkipped(0)
  97. {
  98. }
  99. LL_INLINE ~LLVBOPool()
  100. {
  101. logStats();
  102. clear();
  103. if (sNameIdx)
  104. {
  105. glDeleteBuffers(sNameIdx, sNamePool);
  106. sNameIdx = 0;
  107. }
  108. }
  109. LL_INLINE U32 adjustSize(U32 size)
  110. {
  111. U32 block_size = llmax(nhpo2(size) / 8, 16);
  112. return size + block_size - (size % block_size);
  113. }
  114. U8* allocate(S32 type, U32 size, U32& name);
  115. // Size MUST be the size provided to allocate that returned the given name
  116. void free(S32 type, U32 size, U32 name, U8* data);
  117. void clean(bool force = false);
  118. void clear();
  119. U64 getVRAMMegabytes() const
  120. {
  121. return BYTES2MEGABYTES(mAllocated + mReserved);
  122. }
  123. void logStats();
  124. private:
  125. U32 genBuffer();
  126. void deleteBuffer(U32 name);
  127. private:
  128. struct Entry
  129. {
  130. LL_INLINE Entry(U8* data, U32 name, U32 current_frame)
  131. : mData(data),
  132. mGLName(name),
  133. mFrameStamp(current_frame)
  134. {
  135. }
  136. U8* mData;
  137. U32 mGLName;
  138. U32 mFrameStamp;
  139. };
  140. typedef std::list<Entry> entry_list_t;
  141. typedef fast_hmap<U32, entry_list_t > pool_map_t;
  142. pool_map_t mVBOPool;
  143. pool_map_t mIBOPool;
  144. S64 mAllocated;
  145. S64 mReserved;
  146. S64 mRequested;
  147. U32 mBufferCount;
  148. U32 mTotalHits;
  149. U32 mAllocCount;
  150. U32 mMissCount;
  151. U32 mSkipped;
  152. // Used to avoid calling glGenBuffers() for every VBO creation
  153. static U32 sNamePool[POOL_SIZE];
  154. static U32 sNameIdx;
  155. };
  156. static LLVBOPool* sVBOPool = NULL;
  157. // Static members
  158. U32 LLVBOPool::sNamePool[POOL_SIZE];
  159. U32 LLVBOPool::sNameIdx = 0;
  160. U32 LLVBOPool::genBuffer()
  161. {
  162. if (!sNameIdx)
  163. {
  164. if (gGLManager.mIsAMD)
  165. {
  166. // Workaround for AMD bug.
  167. for (U32 i = 0; i < POOL_SIZE; ++i)
  168. {
  169. glGenBuffers(1, sNamePool + i);
  170. }
  171. }
  172. else
  173. {
  174. glGenBuffers(POOL_SIZE, sNamePool);
  175. }
  176. sNameIdx = POOL_SIZE;
  177. }
  178. return sNamePool[--sNameIdx];
  179. }
  180. U8* LLVBOPool::allocate(S32 type, U32 size, U32& name)
  181. {
  182. U8* ret = NULL;
  183. ++mAllocCount;
  184. mRequested += size;
  185. size = adjustSize(size);
  186. mAllocated += size;
  187. auto& pool = type == GL_ELEMENT_ARRAY_BUFFER ? mIBOPool : mVBOPool;
  188. pool_map_t::iterator iter = pool.find(size);
  189. if (iter != pool.end())
  190. {
  191. ++mTotalHits;
  192. mReserved -= size;
  193. if (mReserved < 0)
  194. {
  195. llwarns << "Reserved buffers accounting mismatch: "
  196. << mReserved << ". Zeroed." << llendl;
  197. mReserved = 0;
  198. }
  199. // Found a free buffer
  200. entry_list_t& entries = iter->second;
  201. Entry& entry = entries.back();
  202. name = entry.mGLName;
  203. ret = entry.mData;
  204. // Remove this entry from the list
  205. entries.pop_back();
  206. if (entries.empty())
  207. {
  208. // Remove this list of empty entries
  209. pool.erase(iter);
  210. }
  211. return ret;
  212. }
  213. // Cache miss, allocate a new buffer
  214. ++mMissCount;
  215. name = genBuffer();
  216. glBindBuffer(type, name);
  217. // Note: we now use the GL_DYNAMIC_DRAW hint everywhere: I did test (with
  218. // a key = usage << 32 + size for the cache) with usage hints preservation,
  219. // but it simply does not change anything at all to frame rates (my guess
  220. // is that modern GL drivers find the right usage and ignore the hint,
  221. // which most programmers get wrong anyway). HB
  222. glBufferData(type, size, NULL, GL_DYNAMIC_DRAW);
  223. ret = (U8*)ll_aligned_malloc(size, 64);
  224. if (ret)
  225. {
  226. ++mBufferCount;
  227. if (type == GL_ELEMENT_ARRAY_BUFFER)
  228. {
  229. LLVertexBuffer::sGLRenderIndices = name;
  230. }
  231. else
  232. {
  233. LLVertexBuffer::sGLRenderBuffer = name;
  234. }
  235. }
  236. else
  237. {
  238. LLMemory::allocationFailed();
  239. llwarns << "Memory allocation for Vertex Buffer. Do expect a crash soon..."
  240. << llendl;
  241. }
  242. return ret;
  243. }
  244. void LLVBOPool::free(S32 type, U32 size, U32 name, U8* data)
  245. {
  246. if (name == LLVertexBuffer::sGLRenderBuffer)
  247. {
  248. LLVertexBuffer::unbind();
  249. }
  250. mRequested -= size;
  251. if (mRequested < 0)
  252. {
  253. llwarns << "Requested buffers accounting mismatch: " << mRequested
  254. << ". Zeroed." << llendl;
  255. mRequested = 0;
  256. }
  257. size = adjustSize(size);
  258. mAllocated -= size;
  259. if (mAllocated < 0)
  260. {
  261. llwarns << "Allocated buffers accounting mismatch: " << mAllocated
  262. << ". Zeroed." << llendl;
  263. mAllocated = 0;
  264. }
  265. mReserved += size;
  266. auto& pool = type == GL_ELEMENT_ARRAY_BUFFER ? mIBOPool : mVBOPool;
  267. pool_map_t::iterator iter = pool.find(size);
  268. if (iter != pool.end())
  269. {
  270. // Re-add this freed pool to the existing list
  271. iter->second.emplace_front(data, name, LLRender::sCurrentFrame);
  272. }
  273. else
  274. {
  275. // Make a new list and add this entry to it.
  276. entry_list_t newlist;
  277. newlist.emplace_front(data, name, LLRender::sCurrentFrame);
  278. pool.emplace(size, std::move(newlist));
  279. }
  280. }
  281. void LLVBOPool::clean(bool force)
  282. {
  283. if (!force && mMissCount < 1024 &&
  284. // Do not let the VBO cache grow and stay too large either... HB
  285. (mBufferCount < 5 * POOL_SIZE || mSkipped < 600))
  286. {
  287. ++mSkipped;
  288. return;
  289. }
  290. mMissCount = mSkipped = 0;
  291. constexpr U32 MAX_FRAME_AGE = 120;
  292. U32 current_frame = LLRender::sCurrentFrame;
  293. std::vector<U32> pending_deletions;
  294. for (pool_map_t::iterator it = mIBOPool.begin(), end = mIBOPool.end();
  295. it != end; )
  296. {
  297. auto& entries = it->second;
  298. while (!entries.empty())
  299. {
  300. auto& entry = entries.back();
  301. if (current_frame - entry.mFrameStamp < MAX_FRAME_AGE)
  302. {
  303. break;
  304. }
  305. ll_aligned_free(entry.mData);
  306. mReserved -= it->first;
  307. --mBufferCount;
  308. pending_deletions.push_back(entry.mGLName);
  309. entries.pop_back();
  310. }
  311. if (entries.empty())
  312. {
  313. it = mIBOPool.erase(it);
  314. }
  315. else
  316. {
  317. ++it;
  318. }
  319. }
  320. for (pool_map_t::iterator it = mVBOPool.begin(), end = mVBOPool.end();
  321. it != end; )
  322. {
  323. auto& entries = it->second;
  324. while (!entries.empty())
  325. {
  326. auto& entry = entries.back();
  327. if (current_frame - entry.mFrameStamp < MAX_FRAME_AGE)
  328. {
  329. break;
  330. }
  331. ll_aligned_free(entry.mData);
  332. mReserved -= it->first;
  333. --mBufferCount;
  334. pending_deletions.push_back(entry.mGLName);
  335. entries.pop_back();
  336. }
  337. if (entries.empty())
  338. {
  339. it = mVBOPool.erase(it);
  340. }
  341. else
  342. {
  343. ++it;
  344. }
  345. }
  346. if (mReserved < 0)
  347. {
  348. llwarns << "Reserved buffers accounting mismatch: " << mReserved
  349. << ". Zeroed." << llendl;
  350. mReserved = 0;
  351. }
  352. size_t pending = pending_deletions.size();
  353. if (pending)
  354. {
  355. glDeleteBuffers(pending, pending_deletions.data());
  356. // Only log stats when the debug tag is enabled. HB
  357. bool log_stats = false;
  358. LL_DEBUGS("VertexBuffer") << "Erased " << pending;
  359. log_stats = true;
  360. LL_CONT << " expired buffers." << LL_ENDL;
  361. if (log_stats)
  362. {
  363. logStats();
  364. }
  365. }
  366. }
  367. void LLVBOPool::clear()
  368. {
  369. std::vector<U32> pending_deletions;
  370. pending_deletions.reserve(mIBOPool.size() + mVBOPool.size());
  371. for (auto& entries : mIBOPool)
  372. {
  373. for (auto& entry : entries.second)
  374. {
  375. ll_aligned_free(entry.mData);
  376. --mBufferCount;
  377. pending_deletions.push_back(entry.mGLName);
  378. }
  379. }
  380. for (auto& entries : mVBOPool)
  381. {
  382. for (auto& entry : entries.second)
  383. {
  384. ll_aligned_free(entry.mData);
  385. --mBufferCount;
  386. pending_deletions.push_back(entry.mGLName);
  387. }
  388. }
  389. size_t pending = pending_deletions.size();
  390. if (pending)
  391. {
  392. glDeleteBuffers(pending, pending_deletions.data());
  393. }
  394. mIBOPool.clear();
  395. mVBOPool.clear();
  396. mReserved = 0;
  397. }
  398. void LLVBOPool::logStats()
  399. {
  400. if (!mRequested || !mAllocCount)
  401. {
  402. return;
  403. }
  404. llinfos << "VBO pool stats: " << mBufferCount << " total buffers, "
  405. << BYTES2MEGABYTES(mRequested) << "MB in use, "
  406. << BYTES2MEGABYTES(mAllocated) << "MB allocated (overhead: "
  407. << 0.1f * ((mAllocated - mRequested) * 1000 / mRequested) << "%), "
  408. << BYTES2MEGABYTES(mReserved) << "MB available in cache, "
  409. << BYTES2MEGABYTES(mAllocated + mReserved)
  410. << "MB total in VRAM. Cache hit rate: "
  411. << 0.1f * (mTotalHits * 1000 / mAllocCount) << "%" << LL_ENDL;
  412. }
  413. ///////////////////////////////////////////////////////////////////////////////
  414. // LLVertexBuffer class
  415. ///////////////////////////////////////////////////////////////////////////////
  416. #if LL_DEBUG_VB_ALLOC
  417. LLVertexBuffer::instances_set_t LLVertexBuffer::sInstances;
  418. #endif
  419. LLPointer<LLVertexBuffer> LLVertexBuffer::sUtilityBuffer = NULL;
  420. U32 LLVertexBuffer::sBindCount = 0;
  421. U32 LLVertexBuffer::sSetCount = 0;
  422. S32 LLVertexBuffer::sGLCount = 0;
  423. U32 LLVertexBuffer::sGLRenderBuffer = 0;
  424. U32 LLVertexBuffer::sGLRenderIndices = 0;
  425. U32 LLVertexBuffer::sLastMask = 0;
  426. U32 LLVertexBuffer::sVertexCount = 0;
  427. U32 LLVertexBuffer::sIndexCount = 0;
  428. bool LLVertexBuffer::sVBOActive = false;
  429. bool LLVertexBuffer::sIBOActive = false;
  430. const U32 LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_MAX] =
  431. {
  432. sizeof(LLVector4), // TYPE_VERTEX,
  433. sizeof(LLVector4), // TYPE_NORMAL,
  434. sizeof(LLVector2), // TYPE_TEXCOORD0,
  435. sizeof(LLVector2), // TYPE_TEXCOORD1,
  436. sizeof(LLVector2), // TYPE_TEXCOORD2,
  437. sizeof(LLVector2), // TYPE_TEXCOORD3,
  438. sizeof(LLColor4U), // TYPE_COLOR,
  439. sizeof(LLColor4U), // TYPE_EMISSIVE, only alpha is used currently
  440. sizeof(LLVector4), // TYPE_TANGENT,
  441. sizeof(F32), // TYPE_WEIGHT,
  442. sizeof(LLVector4), // TYPE_WEIGHT4,
  443. sizeof(LLVector4), // TYPE_CLOTHWEIGHT,
  444. sizeof(U64), // TYPE_JOINT,
  445. // Actually exists as position.w, no extra data, but stride is 16 bytes
  446. sizeof(LLVector4), // TYPE_TEXTURE_INDEX
  447. };
  448. static const std::string vb_type_name[] =
  449. {
  450. "TYPE_VERTEX",
  451. "TYPE_NORMAL",
  452. "TYPE_TEXCOORD0",
  453. "TYPE_TEXCOORD1",
  454. "TYPE_TEXCOORD2",
  455. "TYPE_TEXCOORD3",
  456. "TYPE_COLOR",
  457. "TYPE_EMISSIVE",
  458. "TYPE_TANGENT",
  459. "TYPE_WEIGHT",
  460. "TYPE_WEIGHT4",
  461. "TYPE_CLOTHWEIGHT",
  462. "TYPE_JOINT",
  463. "TYPE_TEXTURE_INDEX",
  464. "TYPE_MAX",
  465. "TYPE_INDEX",
  466. };
  467. const U32 LLVertexBuffer::sGLMode[LLRender::NUM_MODES] =
  468. {
  469. GL_TRIANGLES,
  470. GL_TRIANGLE_STRIP,
  471. GL_TRIANGLE_FAN,
  472. GL_POINTS,
  473. GL_LINES,
  474. GL_LINE_STRIP,
  475. GL_LINE_LOOP,
  476. };
  477. //static
  478. void LLVertexBuffer::initClass()
  479. {
  480. if (!sVBOPool)
  481. {
  482. sVBOPool = new LLVBOPool();
  483. }
  484. if (gUsePBRShaders)
  485. {
  486. // Do not allocate the utility buffer for PBR rendering. This would
  487. // break draw calls using it. *TODO: repair it for PBR. HB
  488. sUtilityBuffer = NULL;
  489. return;
  490. }
  491. sUtilityBuffer = new LLVertexBuffer(MAP_VERTEX | MAP_NORMAL |
  492. MAP_TEXCOORD0);
  493. #if LL_DEBUG_VB_ALLOC
  494. sUtilityBuffer->setOwner("Utility buffer");
  495. #endif
  496. if (!sUtilityBuffer->allocateBuffer(65536, 65536))
  497. {
  498. sUtilityBuffer = NULL;
  499. llwarns << "Failed to allocate the utility buffer" << llendl;
  500. }
  501. }
  502. //static
  503. S32 LLVertexBuffer::getVRAMMegabytes()
  504. {
  505. return sVBOPool ? sVBOPool->getVRAMMegabytes() : 0;
  506. }
  507. //static
  508. void LLVertexBuffer::cleanupVBOPool()
  509. {
  510. if (sVBOPool)
  511. {
  512. sVBOPool->clean();
  513. }
  514. }
  515. //static
  516. void LLVertexBuffer::cleanupClass()
  517. {
  518. unbind();
  519. sLastMask = 0;
  520. sUtilityBuffer = NULL;
  521. if (sVBOPool)
  522. {
  523. // Note: do *not* destroy the existing VBO pool unless we are exiting;
  524. // this would cause VB memory accounting mismatches. HB
  525. if (LLApp::isExiting())
  526. {
  527. delete sVBOPool;
  528. sVBOPool = NULL;
  529. }
  530. else
  531. {
  532. sVBOPool->clear();
  533. }
  534. }
  535. }
  536. LLVertexBuffer::LLVertexBuffer(U32 typemask)
  537. : mNumVerts(0),
  538. mNumIndices(0),
  539. mIndicesType(GL_UNSIGNED_SHORT),
  540. mIndicesStride(sizeof(U16)),
  541. mSize(0),
  542. mIndicesSize(0),
  543. mTypeMask(typemask),
  544. mTypeMaskMask(0),
  545. mGLBuffer(0),
  546. mGLIndices(0),
  547. mMappedData(NULL),
  548. mMappedIndexData(NULL),
  549. mCachedBuffer(false)
  550. {
  551. // Zero out offsets
  552. for (U32 i = 0; i < TYPE_MAX; ++i)
  553. {
  554. mOffsets[i] = 0;
  555. }
  556. #if LL_DEBUG_VB_ALLOC
  557. sInstances.insert(this);
  558. #endif
  559. }
  560. // Protected, use unref()
  561. //virtual
  562. LLVertexBuffer::~LLVertexBuffer()
  563. {
  564. unmapBuffer();
  565. destroyGLBuffer();
  566. destroyGLIndices();
  567. sVertexCount -= mNumVerts;
  568. sIndexCount -= mNumIndices;
  569. #if LL_DEBUG_VB_ALLOC
  570. sInstances.erase(this);
  571. #endif
  572. if (gDebugGL)
  573. {
  574. if (mMappedData)
  575. {
  576. llerrs << "Failed to clear vertex buffer vertices" << llendl;
  577. }
  578. if (mMappedIndexData)
  579. {
  580. llerrs << "Failed to clear vertex buffer indices" << llendl;
  581. }
  582. }
  583. }
  584. #if LL_DEBUG_VB_ALLOC
  585. //static
  586. void LLVertexBuffer::dumpInstances()
  587. {
  588. if (sInstances.empty())
  589. {
  590. return;
  591. }
  592. llinfos << "Allocated buffers:";
  593. for (instances_set_t::const_iterator it = sInstances.begin(),
  594. end = sInstances.end();
  595. it != end; ++it)
  596. {
  597. const LLVertexBuffer* vb = *it;
  598. llcont << "\n - 0x" << std::hex << intptr_t(vb) << std::dec << ": "
  599. << vb->mOwner;
  600. }
  601. llcont << llendl;
  602. }
  603. #endif
  604. //static
  605. void LLVertexBuffer::setupClientArrays(U32 data_mask)
  606. {
  607. if (sLastMask != data_mask)
  608. {
  609. if (!gGLManager.mHasVertexAttribIPointer)
  610. {
  611. // Make sure texture index is disabled
  612. data_mask = data_mask & ~MAP_TEXTURE_INDEX;
  613. }
  614. for (U32 i = 0; i < TYPE_MAX; ++i)
  615. {
  616. U32 mask = 1 << i;
  617. if (sLastMask & mask)
  618. {
  619. // Was enabled
  620. if (!(data_mask & mask))
  621. {
  622. // Needs to be disabled
  623. glDisableVertexAttribArray((GLint)i);
  624. }
  625. }
  626. else if (data_mask & mask)
  627. {
  628. // Was disabled and needs to be enabled
  629. glEnableVertexAttribArray((GLint)i);
  630. }
  631. }
  632. sLastMask = data_mask;
  633. }
  634. }
  635. // LL's new (fixed) but slow code, and without normals support.
  636. //static
  637. void LLVertexBuffer::drawArrays(U32 mode, const std::vector<LLVector3>& pos)
  638. {
  639. gGL.begin(mode);
  640. for (U32 i = 0, count = pos.size(); i < count; ++i)
  641. {
  642. gGL.vertex3fv(pos[i].mV);
  643. }
  644. gGL.end(true);
  645. }
  646. //static
  647. void LLVertexBuffer::drawArrays(U32 mode, const std::vector<LLVector3>& pos,
  648. const std::vector<LLVector3>& norm)
  649. {
  650. U32 count = pos.size();
  651. if (count == 0)
  652. {
  653. return;
  654. }
  655. if (count <= 65536 && sUtilityBuffer.notNull())
  656. {
  657. gGL.syncMatrices();
  658. if (norm.size() < count)
  659. {
  660. llwarns_once << "Less normals (" << norm.size()
  661. << ") than vertices (" << count
  662. << "), aborting." << llendl;
  663. return;
  664. }
  665. // Vertex-buffer based, optimized code
  666. LLStrider<LLVector3> vertex_strider;
  667. LLStrider<LLVector3> normal_strider;
  668. if (!sUtilityBuffer->getVertexStrider(vertex_strider) ||
  669. !sUtilityBuffer->getNormalStrider(normal_strider))
  670. {
  671. llwarns_sparse << "Failed to get striders, aborting." << llendl;
  672. return;
  673. }
  674. for (U32 i = 0; i < count; ++i)
  675. {
  676. *(vertex_strider++) = pos[i];
  677. *(normal_strider++) = norm[i];
  678. }
  679. sUtilityBuffer->setBuffer(MAP_VERTEX | MAP_NORMAL);
  680. sUtilityBuffer->drawArrays(mode, 0, pos.size());
  681. }
  682. else
  683. {
  684. // Fallback to LL's new (fixed) but slow code, and without normals
  685. // support
  686. drawArrays(mode, pos);
  687. }
  688. }
  689. //static
  690. void LLVertexBuffer::drawElements(U32 num_vertices, const LLVector4a* posp,
  691. const LLVector2* tcp, U32 num_indices,
  692. const U16* indicesp)
  693. {
  694. if (!posp || !indicesp || num_vertices <= 0 || num_indices <= 0)
  695. {
  696. llwarns << (posp ? "" : "NULL positions pointer - ")
  697. << (indicesp ? "" : "NULL indices pointer - ")
  698. << num_vertices << " vertices - " << num_indices
  699. << " indices. Aborting." << llendl;
  700. return;
  701. }
  702. gGL.syncMatrices();
  703. if (num_vertices <= 65536 && num_indices <= 65536 &&
  704. sUtilityBuffer.notNull())
  705. {
  706. // Vertex-buffer based, optimized code
  707. LLStrider<LLVector4a> vertex_strider;
  708. LLStrider<U16> index_strider;
  709. if (!sUtilityBuffer->getVertexStrider(vertex_strider) ||
  710. !sUtilityBuffer->getIndexStrider(index_strider))
  711. {
  712. llwarns_sparse << "Failed to get striders, aborting." << llendl;
  713. return;
  714. }
  715. U32 index_size = ((num_indices * sizeof(U16)) + 0xF) & ~0xF;
  716. LLVector4a::memcpyNonAliased16((F32*)index_strider.get(),
  717. (F32*)indicesp, index_size);
  718. U32 vertex_size = ((num_vertices * 4 * sizeof(F32)) + 0xF) & ~0xF;
  719. LLVector4a::memcpyNonAliased16((F32*)vertex_strider.get(), (F32*)posp,
  720. vertex_size);
  721. U32 mask = LLVertexBuffer::MAP_VERTEX;
  722. if (tcp)
  723. {
  724. mask |= LLVertexBuffer::MAP_TEXCOORD0;
  725. LLStrider<LLVector2> tc_strider;
  726. if (!sUtilityBuffer->getTexCoord0Strider(tc_strider))
  727. {
  728. llwarns_sparse << "Failed to get coord strider, aborting."
  729. << llendl;
  730. return;
  731. }
  732. U32 tc_size = ((num_vertices * 2 * sizeof(F32)) + 0xF) & ~0xF;
  733. LLVector4a::memcpyNonAliased16((F32*)tc_strider.get(), (F32*)tcp,
  734. tc_size);
  735. }
  736. sUtilityBuffer->setBuffer(mask);
  737. sUtilityBuffer->draw(LLRender::TRIANGLES, num_indices, 0);
  738. }
  739. else // LL's new but slow code
  740. {
  741. unbind();
  742. gGL.begin(LLRender::TRIANGLES);
  743. if (tcp)
  744. {
  745. for (U32 i = 0; i < num_indices; ++i)
  746. {
  747. U16 idx = indicesp[i];
  748. gGL.texCoord2fv(tcp[idx].mV);
  749. gGL.vertex3fv(posp[idx].getF32ptr());
  750. }
  751. }
  752. else
  753. {
  754. for (U32 i = 0; i < num_indices; ++i)
  755. {
  756. U16 idx = indicesp[i];
  757. gGL.vertex3fv(posp[idx].getF32ptr());
  758. }
  759. }
  760. gGL.end(true);
  761. }
  762. }
  763. bool LLVertexBuffer::validateRange(U32 start, U32 end, U32 count,
  764. U32 indices_offset) const
  765. {
  766. if (start >= mNumVerts || end >= mNumVerts)
  767. {
  768. llwarns << "Bad vertex buffer draw range: [" << start << ", " << end
  769. << "] vs " << mNumVerts << llendl;
  770. return false;
  771. }
  772. if (indices_offset >= mNumIndices || indices_offset + count > mNumIndices)
  773. {
  774. llwarns << "Bad index buffer draw range: [" << indices_offset << ", "
  775. << indices_offset + count << "] vs " << mNumIndices << llendl;
  776. return false;
  777. }
  778. if (gUsePBRShaders && gDebugGL)
  779. {
  780. U16* idx = (U16*)mMappedIndexData + indices_offset;
  781. for (U32 i = 0; i < count; ++i)
  782. {
  783. if (idx[i] < start || idx[i] > end)
  784. {
  785. llwarns << "Index out of range:" << idx[i] << " not in ["
  786. << start << ", " << end << "]" << llendl;
  787. return false;
  788. }
  789. }
  790. LLVector4a* v = (LLVector4a*)mMappedData;
  791. for (U32 i = start; i <= end; ++i)
  792. {
  793. if (!v[i].isFinite3())
  794. {
  795. llwarns << "Non-finite vertex position data detected."
  796. << llendl;
  797. return false;
  798. }
  799. }
  800. LLGLSLShader* shaderp = LLGLSLShader::sCurBoundShaderPtr;
  801. if (shaderp && shaderp->mFeatures.mIndexedTextureChannels > 1)
  802. {
  803. LLVector4a* v = (LLVector4a*)mMappedData;
  804. for (U32 i = start; i < end; ++i)
  805. {
  806. U32 idx = U32(v[i][3] + 0.25f);
  807. if (idx >= (U32)shaderp->mFeatures.mIndexedTextureChannels)
  808. {
  809. llwarns << "Bad texture index (" << idx
  810. << ") found for shader: " << shaderp->mName
  811. << ". Max index should be "
  812. << shaderp->mFeatures.mIndexedTextureChannels - 1
  813. << "." << llendl;
  814. return false;
  815. }
  816. }
  817. }
  818. }
  819. return true;
  820. }
  821. void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count,
  822. U32 indices_offset) const
  823. {
  824. gGL.syncMatrices();
  825. if (gDebugGL && !gUsePBRShaders)
  826. {
  827. if (!LLGLSLShader::sCurBoundShaderPtr)
  828. {
  829. llwarns << "No bound shader." << llendl;
  830. llassert(false);
  831. }
  832. if (mGLIndices != sGLRenderIndices)
  833. {
  834. llwarns << "Wrong index buffer bound." << llendl;
  835. llassert(false);
  836. }
  837. if (mGLBuffer != sGLRenderBuffer)
  838. {
  839. llwarns << "Wrong vertex buffer bound." << llendl;
  840. llassert(false);
  841. }
  842. if (!validateRange(start, end, count, indices_offset))
  843. {
  844. llwarns << "Check failed." << llendl;
  845. llassert(false);
  846. }
  847. if (!gUsePBRShaders)
  848. {
  849. GLint elem = 0;
  850. glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING, &elem);
  851. if ((U32)elem != mGLIndices)
  852. {
  853. llwarns << "Wrong index buffer bound." << llendl;
  854. llassert(false);
  855. }
  856. }
  857. }
  858. LLGLSLShader::startProfile();
  859. glDrawRangeElements(sGLMode[mode], start, end, count, mIndicesType,
  860. (const void*)(indices_offset * mIndicesStride));
  861. LLGLSLShader::stopProfile();
  862. }
  863. void LLVertexBuffer::drawRangeFast(U32 start, U32 end, U32 count,
  864. U32 indices_offset) const
  865. {
  866. gGL.syncMatrices();
  867. glDrawRangeElements(sGLMode[LLRender::TRIANGLES], start, end, count,
  868. mIndicesType,
  869. (const void*)(indices_offset * mIndicesStride));
  870. }
  871. void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
  872. {
  873. drawRange(mode, 0, mNumVerts - 1, count, indices_offset);
  874. }
  875. void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
  876. {
  877. gGL.syncMatrices();
  878. if (gDebugGL && !gUsePBRShaders)
  879. {
  880. if (!LLGLSLShader::sCurBoundShaderPtr)
  881. {
  882. llwarns << "No bound shader" << llendl;
  883. llassert(false);
  884. }
  885. if (first >= mNumVerts || first + count > mNumVerts)
  886. {
  887. llwarns << "Bad vertex buffer draw range: [" << first << ", "
  888. << first + count << "] vs " << mNumVerts << ". Aborted."
  889. << llendl;
  890. llassert(false);
  891. }
  892. if (mGLBuffer != sGLRenderBuffer || !sVBOActive)
  893. {
  894. llwarns << "Wrong vertex buffer bound." << llendl;
  895. llassert(false);
  896. }
  897. }
  898. LLGLSLShader::startProfile();
  899. glDrawArrays(sGLMode[mode], first, count);
  900. LLGLSLShader::stopProfile();
  901. }
  902. //static
  903. void LLVertexBuffer::unbind()
  904. {
  905. if (sVBOActive)
  906. {
  907. glBindBuffer(GL_ARRAY_BUFFER, 0);
  908. sVBOActive = false;
  909. }
  910. if (sIBOActive)
  911. {
  912. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
  913. sIBOActive = false;
  914. }
  915. sGLRenderBuffer = sGLRenderIndices = 0;
  916. if (!gUsePBRShaders)
  917. {
  918. setupClientArrays(0);
  919. }
  920. }
  921. //static
  922. U32 LLVertexBuffer::calcOffsets(U32 typemask, U32* offsets, U32 num_vertices)
  923. {
  924. U32 offset = 0;
  925. for (U32 i = 0; i < TYPE_TEXTURE_INDEX; ++i)
  926. {
  927. U32 mask = 1 << i;
  928. if (typemask & mask)
  929. {
  930. if (offsets && sTypeSize[i])
  931. {
  932. offsets[i] = offset;
  933. offset += sTypeSize[i] * num_vertices;
  934. offset = (offset + 0xF) & ~0xF;
  935. }
  936. }
  937. }
  938. offsets[TYPE_TEXTURE_INDEX] = offsets[TYPE_VERTEX] + 12;
  939. return offset;
  940. }
  941. //static
  942. U32 LLVertexBuffer::calcVertexSize(U32 typemask)
  943. {
  944. U32 size = 0;
  945. for (U32 i = 0; i < TYPE_TEXTURE_INDEX; ++i)
  946. {
  947. U32 mask = 1 << i;
  948. if (typemask & mask)
  949. {
  950. size += sTypeSize[i];
  951. }
  952. }
  953. return size;
  954. }
  955. void LLVertexBuffer::genBuffer(U32 size)
  956. {
  957. if (sVBOPool)
  958. {
  959. mSize = size;
  960. mMappedData = sVBOPool->allocate(GL_ARRAY_BUFFER, size, mGLBuffer);
  961. ++sGLCount;
  962. }
  963. }
  964. void LLVertexBuffer::genIndices(U32 size)
  965. {
  966. if (sVBOPool)
  967. {
  968. mIndicesSize = size;
  969. mMappedIndexData = sVBOPool->allocate(GL_ELEMENT_ARRAY_BUFFER, size,
  970. mGLIndices);
  971. ++sGLCount;
  972. }
  973. }
  974. bool LLVertexBuffer::createGLBuffer(U32 size)
  975. {
  976. if (mGLBuffer || mMappedData)
  977. {
  978. destroyGLBuffer();
  979. }
  980. if (size == 0)
  981. {
  982. return true;
  983. }
  984. genBuffer(size);
  985. return mMappedData != NULL;
  986. }
  987. bool LLVertexBuffer::createGLIndices(U32 size)
  988. {
  989. if (mGLIndices)
  990. {
  991. destroyGLIndices();
  992. }
  993. if (size == 0)
  994. {
  995. return true;
  996. }
  997. genIndices(size);
  998. return mMappedIndexData != NULL;
  999. }
  1000. void LLVertexBuffer::destroyGLBuffer()
  1001. {
  1002. if (mGLBuffer || mMappedData)
  1003. {
  1004. if (sVBOPool)
  1005. {
  1006. sVBOPool->free(GL_ARRAY_BUFFER, mSize, mGLBuffer, mMappedData);
  1007. }
  1008. mSize = 0;
  1009. mGLBuffer = 0;
  1010. mMappedData = NULL;
  1011. --sGLCount;
  1012. }
  1013. }
  1014. void LLVertexBuffer::destroyGLIndices()
  1015. {
  1016. if (mGLIndices || mMappedIndexData)
  1017. {
  1018. if (sVBOPool)
  1019. {
  1020. sVBOPool->free(GL_ELEMENT_ARRAY_BUFFER, mIndicesSize, mGLIndices,
  1021. mMappedIndexData);
  1022. }
  1023. mIndicesSize = 0;
  1024. mGLIndices = 0;
  1025. mMappedIndexData = NULL;
  1026. --sGLCount;
  1027. }
  1028. }
  1029. bool LLVertexBuffer::updateNumVerts(U32 nverts)
  1030. {
  1031. bool success = true;
  1032. if (nverts > 65536)
  1033. {
  1034. llwarns << "Vertex buffer overflow !" << llendl;
  1035. nverts = 65536;
  1036. }
  1037. U32 needed_size = calcOffsets(mTypeMask, mOffsets, nverts);
  1038. if (needed_size != mSize)
  1039. {
  1040. success = createGLBuffer(needed_size);
  1041. }
  1042. sVertexCount -= mNumVerts;
  1043. mNumVerts = nverts;
  1044. sVertexCount += mNumVerts;
  1045. return success;
  1046. }
  1047. bool LLVertexBuffer::updateNumIndices(U32 nindices)
  1048. {
  1049. bool success = true;
  1050. U32 needed_size = sizeof(U16) * nindices;
  1051. if (needed_size != mIndicesSize)
  1052. {
  1053. success = createGLIndices(needed_size);
  1054. }
  1055. sIndexCount -= mNumIndices;
  1056. mNumIndices = nindices;
  1057. sIndexCount += mNumIndices;
  1058. return success;
  1059. }
  1060. bool LLVertexBuffer::allocateBuffer(U32 nverts, U32 nindices)
  1061. {
  1062. if (nverts > 65536)
  1063. {
  1064. llerrs << "To many vertices: " << nverts << llendl;
  1065. }
  1066. bool success = updateNumVerts(nverts);
  1067. success &= updateNumIndices(nindices);
  1068. if (success && !gUsePBRShaders && (nverts || nindices))
  1069. {
  1070. unmapBuffer();
  1071. }
  1072. return success;
  1073. }
  1074. static bool expand_region(LLVertexBuffer::MappedRegion& region, U32 start,
  1075. U32 end)
  1076. {
  1077. if (end < region.mStart || start > region.mEnd)
  1078. {
  1079. // There is a gap, do not merge
  1080. return false;
  1081. }
  1082. region.mStart = llmin(region.mStart, start);
  1083. region.mEnd = llmax(region.mEnd, end);
  1084. return true;
  1085. }
  1086. // Map for data access
  1087. U8* LLVertexBuffer::mapVertexBuffer(S32 type, U32 index, S32 count)
  1088. {
  1089. if (!mCachedBuffer && !gUsePBRShaders)
  1090. {
  1091. bindGLBuffer(true);
  1092. }
  1093. if (count == -1)
  1094. {
  1095. count = mNumVerts - index;
  1096. }
  1097. U32 start = mOffsets[type] + sTypeSize[type] * index;
  1098. U32 end = start + sTypeSize[type] * count - 1;
  1099. bool mapped = false;
  1100. // Flag region as mapped
  1101. for (U32 i = 0, count = mMappedVertexRegions.size(); i < count; ++i)
  1102. {
  1103. MappedRegion& region = mMappedVertexRegions[i];
  1104. if (expand_region(region, start, end))
  1105. {
  1106. mapped = true;
  1107. break;
  1108. }
  1109. }
  1110. if (!mapped)
  1111. {
  1112. // Not already mapped, map new region
  1113. mMappedVertexRegions.emplace_back(start, end);
  1114. }
  1115. return mMappedData + mOffsets[type] + sTypeSize[type] * index;
  1116. }
  1117. U8* LLVertexBuffer::mapIndexBuffer(U32 index, S32 count)
  1118. {
  1119. bindGLIndices(!mCachedBuffer);
  1120. if (count == -1)
  1121. {
  1122. count = mNumIndices - index;
  1123. }
  1124. U32 start = sizeof(U16) * index;
  1125. U32 end = start + sizeof(U16) * count - 1;
  1126. bool mapped = false;
  1127. // See if range is already mapped
  1128. for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
  1129. {
  1130. MappedRegion& region = mMappedIndexRegions[i];
  1131. if (expand_region(region, start, end))
  1132. {
  1133. mapped = true;
  1134. break;
  1135. }
  1136. }
  1137. if (!mapped)
  1138. {
  1139. // Not already mapped, map new region
  1140. mMappedIndexRegions.emplace_back(start, end);
  1141. }
  1142. return mMappedIndexData + sizeof(U16) * index;
  1143. }
  1144. struct SortMappedRegion
  1145. {
  1146. LL_INLINE bool operator()(const LLVertexBuffer::MappedRegion& lhs,
  1147. const LLVertexBuffer::MappedRegion& rhs)
  1148. {
  1149. return lhs.mStart < rhs.mStart;
  1150. }
  1151. };
  1152. void LLVertexBuffer::unmapBuffer()
  1153. {
  1154. if (mMappedData && !mMappedVertexRegions.empty())
  1155. {
  1156. LL_TRACY_TIMER(TRC_VBO_UNMAP);
  1157. if (mGLBuffer != sGLRenderBuffer)
  1158. {
  1159. glBindBuffer(GL_ARRAY_BUFFER, mGLBuffer);
  1160. sGLRenderBuffer = mGLBuffer;
  1161. }
  1162. U32 start = 0;
  1163. U32 end = 0;
  1164. std::sort(mMappedVertexRegions.begin(), mMappedVertexRegions.end(),
  1165. SortMappedRegion());
  1166. for (U32 i = 0, count = mMappedVertexRegions.size(); i < count; ++i)
  1167. {
  1168. const MappedRegion& region = mMappedVertexRegions[i];
  1169. if (region.mStart == end + 1)
  1170. {
  1171. end = region.mEnd;
  1172. }
  1173. else
  1174. {
  1175. flush_vbo(GL_ARRAY_BUFFER, start, end,
  1176. (U8*)mMappedData + start);
  1177. start = region.mStart;
  1178. end = region.mEnd;
  1179. }
  1180. }
  1181. flush_vbo(GL_ARRAY_BUFFER, start, end, (U8*)mMappedData + start);
  1182. stop_glerror();
  1183. mMappedVertexRegions.clear();
  1184. }
  1185. if (mMappedIndexData && !mMappedIndexRegions.empty())
  1186. {
  1187. LL_TRACY_TIMER(TRC_IBO_UNMAP);
  1188. if (mGLIndices != sGLRenderIndices)
  1189. {
  1190. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mGLIndices);
  1191. sGLRenderIndices = mGLIndices;
  1192. }
  1193. U32 start = 0;
  1194. U32 end = 0;
  1195. std::sort(mMappedIndexRegions.begin(), mMappedIndexRegions.end(),
  1196. SortMappedRegion());
  1197. for (U32 i = 0, count = mMappedIndexRegions.size(); i < count; ++i)
  1198. {
  1199. const MappedRegion& region = mMappedIndexRegions[i];
  1200. if (region.mStart == end + 1)
  1201. {
  1202. end = region.mEnd;
  1203. }
  1204. else
  1205. {
  1206. flush_vbo(GL_ELEMENT_ARRAY_BUFFER, start, end,
  1207. (U8*)mMappedIndexData + start);
  1208. start = region.mStart;
  1209. end = region.mEnd;
  1210. }
  1211. }
  1212. flush_vbo(GL_ELEMENT_ARRAY_BUFFER, start, end,
  1213. (U8*)mMappedIndexData + start);
  1214. stop_glerror();
  1215. mMappedIndexRegions.clear();
  1216. }
  1217. }
  1218. void LLVertexBuffer::resetVertexData()
  1219. {
  1220. if (mMappedData && mSize)
  1221. {
  1222. memset((void*)mMappedData, 0, mSize);
  1223. }
  1224. }
  1225. void LLVertexBuffer::resetIndexData()
  1226. {
  1227. if (mMappedIndexData && mIndicesSize)
  1228. {
  1229. memset((void*)mMappedIndexData, 0, mIndicesSize);
  1230. }
  1231. }
  1232. template <class T, S32 type>
  1233. class VertexBufferStrider
  1234. {
  1235. protected:
  1236. LOG_CLASS(VertexBufferStrider);
  1237. public:
  1238. typedef LLStrider<T> strider_t;
  1239. static bool get(LLVertexBuffer& vbo, strider_t& strider, U32 index,
  1240. S32 count)
  1241. {
  1242. if (type == LLVertexBuffer::TYPE_INDEX)
  1243. {
  1244. U8* ptr = vbo.mapIndexBuffer(index, count);
  1245. strider = (T*)ptr;
  1246. if (!ptr)
  1247. {
  1248. llwarns << "mapIndexBuffer() failed !" << llendl;
  1249. return false;
  1250. }
  1251. strider.setStride(0);
  1252. return true;
  1253. }
  1254. else if (vbo.hasDataType(type))
  1255. {
  1256. U8* ptr = vbo.mapVertexBuffer(type, index, count);
  1257. strider = (T*)ptr;
  1258. if (!ptr)
  1259. {
  1260. llwarns << "mapVertexBuffer() failed !" << llendl;
  1261. return false;
  1262. }
  1263. strider.setStride(LLVertexBuffer::sTypeSize[type]);
  1264. return true;
  1265. }
  1266. llwarns << "Could not find valid vertex data." << llendl;
  1267. return false;
  1268. }
  1269. };
  1270. bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector3>& strider,
  1271. U32 index, S32 count)
  1272. {
  1273. return VertexBufferStrider<LLVector3, TYPE_VERTEX>::get(*this, strider,
  1274. index, count);
  1275. }
  1276. bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector4a>& strider,
  1277. U32 index, S32 count)
  1278. {
  1279. return VertexBufferStrider<LLVector4a, TYPE_VERTEX>::get(*this, strider,
  1280. index, count);
  1281. }
  1282. bool LLVertexBuffer::getIndexStrider(LLStrider<U16>& strider,
  1283. U32 index, S32 count)
  1284. {
  1285. if (mIndicesType != GL_UNSIGNED_SHORT)
  1286. {
  1287. llassert(false);
  1288. return false; // Cannot access 32 bits indices with U16 strider...
  1289. }
  1290. return VertexBufferStrider<U16, TYPE_INDEX>::get(*this, strider, index,
  1291. count);
  1292. }
  1293. bool LLVertexBuffer::getTexCoord0Strider(LLStrider<LLVector2>& strider,
  1294. U32 index, S32 count)
  1295. {
  1296. return VertexBufferStrider<LLVector2, TYPE_TEXCOORD0>::get(*this, strider,
  1297. index, count);
  1298. }
  1299. bool LLVertexBuffer::getTexCoord1Strider(LLStrider<LLVector2>& strider,
  1300. U32 index, S32 count)
  1301. {
  1302. return VertexBufferStrider<LLVector2, TYPE_TEXCOORD1>::get(*this, strider,
  1303. index, count);
  1304. }
  1305. bool LLVertexBuffer::getTexCoord2Strider(LLStrider<LLVector2>& strider,
  1306. U32 index, S32 count)
  1307. {
  1308. return VertexBufferStrider<LLVector2, TYPE_TEXCOORD2>::get(*this, strider,
  1309. index, count);
  1310. }
  1311. bool LLVertexBuffer::getNormalStrider(LLStrider<LLVector3>& strider,
  1312. U32 index, S32 count)
  1313. {
  1314. return VertexBufferStrider<LLVector3, TYPE_NORMAL>::get(*this, strider,
  1315. index, count);
  1316. }
  1317. bool LLVertexBuffer::getNormalStrider(LLStrider<LLVector4a>& strider,
  1318. U32 index, S32 count)
  1319. {
  1320. return VertexBufferStrider<LLVector4a, TYPE_NORMAL>::get(*this, strider,
  1321. index, count);
  1322. }
  1323. bool LLVertexBuffer::getTangentStrider(LLStrider<LLVector3>& strider,
  1324. U32 index, S32 count)
  1325. {
  1326. return VertexBufferStrider<LLVector3, TYPE_TANGENT>::get(*this, strider,
  1327. index, count);
  1328. }
  1329. bool LLVertexBuffer::getTangentStrider(LLStrider<LLVector4a>& strider,
  1330. U32 index, S32 count)
  1331. {
  1332. return VertexBufferStrider<LLVector4a, TYPE_TANGENT>::get(*this, strider,
  1333. index, count);
  1334. }
  1335. bool LLVertexBuffer::getColorStrider(LLStrider<LLColor4U>& strider,
  1336. U32 index, S32 count)
  1337. {
  1338. return VertexBufferStrider<LLColor4U, TYPE_COLOR>::get(*this, strider,
  1339. index, count);
  1340. }
  1341. bool LLVertexBuffer::getEmissiveStrider(LLStrider<LLColor4U>& strider,
  1342. U32 index, S32 count)
  1343. {
  1344. return VertexBufferStrider<LLColor4U, TYPE_EMISSIVE>::get(*this, strider,
  1345. index, count);
  1346. }
  1347. bool LLVertexBuffer::getWeightStrider(LLStrider<F32>& strider,
  1348. U32 index, S32 count)
  1349. {
  1350. return VertexBufferStrider<F32, TYPE_WEIGHT>::get(*this, strider, index,
  1351. count);
  1352. }
  1353. bool LLVertexBuffer::getWeight4Strider(LLStrider<LLVector4a>& strider,
  1354. U32 index, S32 count)
  1355. {
  1356. return VertexBufferStrider<LLVector4a, TYPE_WEIGHT4>::get(*this, strider,
  1357. index, count);
  1358. }
  1359. bool LLVertexBuffer::getClothWeightStrider(LLStrider<LLVector4a>& strider,
  1360. U32 index, S32 count)
  1361. {
  1362. return VertexBufferStrider<LLVector4a, TYPE_CLOTHWEIGHT>::get(*this,
  1363. strider,
  1364. index,
  1365. count);
  1366. }
  1367. bool LLVertexBuffer::bindGLBuffer(bool force_bind)
  1368. {
  1369. if (mGLBuffer &&
  1370. (force_bind || (mGLBuffer != sGLRenderBuffer || !sVBOActive)))
  1371. {
  1372. LL_TRACY_TIMER(TRC_BIND_GL_BUFFER);
  1373. glBindBuffer(GL_ARRAY_BUFFER, mGLBuffer);
  1374. sGLRenderBuffer = mGLBuffer;
  1375. ++sBindCount;
  1376. sVBOActive = true;
  1377. return true;
  1378. }
  1379. return false;
  1380. }
  1381. bool LLVertexBuffer::bindGLBufferFast()
  1382. {
  1383. if (mGLBuffer != sGLRenderBuffer || !sVBOActive)
  1384. {
  1385. glBindBuffer(GL_ARRAY_BUFFER, mGLBuffer);
  1386. sGLRenderBuffer = mGLBuffer;
  1387. ++sBindCount;
  1388. sVBOActive = true;
  1389. return true;
  1390. }
  1391. return false;
  1392. }
  1393. bool LLVertexBuffer::bindGLIndices(bool force_bind)
  1394. {
  1395. if (mGLIndices &&
  1396. (force_bind || (mGLIndices != sGLRenderIndices || !sIBOActive)))
  1397. {
  1398. LL_TRACY_TIMER(TRC_BIND_GL_INDICES);
  1399. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mGLIndices);
  1400. sGLRenderIndices = mGLIndices;
  1401. stop_glerror();
  1402. ++sBindCount;
  1403. sIBOActive = true;
  1404. return true;
  1405. }
  1406. return false;
  1407. }
  1408. bool LLVertexBuffer::bindGLIndicesFast()
  1409. {
  1410. if (mGLIndices != sGLRenderIndices || !sIBOActive)
  1411. {
  1412. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mGLIndices);
  1413. sGLRenderIndices = mGLIndices;
  1414. ++sBindCount;
  1415. sIBOActive = true;
  1416. return true;
  1417. }
  1418. return false;
  1419. }
  1420. //static
  1421. std::string LLVertexBuffer::listMissingBits(U32 unsatisfied_mask)
  1422. {
  1423. std::string report;
  1424. if (unsatisfied_mask & MAP_VERTEX)
  1425. {
  1426. report = "\n - Missing vert pos";
  1427. }
  1428. if (unsatisfied_mask & MAP_NORMAL)
  1429. {
  1430. report += "\n - Missing normals";
  1431. }
  1432. if (unsatisfied_mask & MAP_TEXCOORD0)
  1433. {
  1434. report += "\n - Missing tex coord 0";
  1435. }
  1436. if (unsatisfied_mask & MAP_TEXCOORD1)
  1437. {
  1438. report += "\n - Missing tex coord 1";
  1439. }
  1440. if (unsatisfied_mask & MAP_TEXCOORD2)
  1441. {
  1442. report += "\n - Missing tex coord 2";
  1443. }
  1444. if (unsatisfied_mask & MAP_TEXCOORD3)
  1445. {
  1446. report += "\n - Missing tex coord 3";
  1447. }
  1448. if (unsatisfied_mask & MAP_COLOR)
  1449. {
  1450. report += "\n - Missing vert color";
  1451. }
  1452. if (unsatisfied_mask & MAP_EMISSIVE)
  1453. {
  1454. report += "\n - Missing emissive";
  1455. }
  1456. if (unsatisfied_mask & MAP_TANGENT)
  1457. {
  1458. report += "\n - Missing tangent";
  1459. }
  1460. if (unsatisfied_mask & MAP_WEIGHT)
  1461. {
  1462. report += "\n - Missing weight";
  1463. }
  1464. if (unsatisfied_mask & MAP_WEIGHT4)
  1465. {
  1466. report += "\n - Missing weight4";
  1467. }
  1468. if (unsatisfied_mask & MAP_CLOTHWEIGHT)
  1469. {
  1470. report += "\n - Missing cloth weight";
  1471. }
  1472. if (unsatisfied_mask & MAP_TEXTURE_INDEX)
  1473. {
  1474. report += "\n - Missing tex index";
  1475. }
  1476. if (unsatisfied_mask & TYPE_INDEX)
  1477. {
  1478. report += "\n - Missing indices";
  1479. }
  1480. return report;
  1481. }
  1482. // Set for rendering. For the legacy EE renderer only.
  1483. void LLVertexBuffer::setBuffer(U32 data_mask)
  1484. {
  1485. // *HACK: in order to simplify the dual-renderer code and reduce the number
  1486. // of tests in it...
  1487. if (gUsePBRShaders)
  1488. {
  1489. setBuffer();
  1490. return;
  1491. }
  1492. unmapBuffer();
  1493. // Set up pointers if the data mask is different ...
  1494. bool setup = sLastMask != data_mask;
  1495. if (data_mask && gDebugGL)
  1496. {
  1497. // Make sure data requirements are fulfilled
  1498. LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
  1499. if (shader)
  1500. {
  1501. U32 required_mask = 0;
  1502. for (U32 i = 0; i < LLVertexBuffer::TYPE_TEXTURE_INDEX; ++i)
  1503. {
  1504. if (shader->getAttribLocation(i) > -1)
  1505. {
  1506. U32 required = 1 << i;
  1507. if ((data_mask & required) == 0)
  1508. {
  1509. llwarns << "Missing attribute: "
  1510. << LLShaderMgr::sReservedAttribs[i] << llendl;
  1511. }
  1512. required_mask |= required;
  1513. }
  1514. }
  1515. U32 unsatisfied_mask = required_mask & ~data_mask;
  1516. if (unsatisfied_mask)
  1517. {
  1518. llwarns << "Shader consumption mismatches data provision:"
  1519. << listMissingBits(unsatisfied_mask) << llendl;
  1520. }
  1521. }
  1522. }
  1523. bool bind_buffer = mGLBuffer && bindGLBufferFast();
  1524. bool bind_indices = mGLIndices && bindGLIndicesFast();
  1525. setup |= bind_buffer || bind_indices;
  1526. if (gDebugGL)
  1527. {
  1528. GLint buff;
  1529. glGetIntegerv(GL_ARRAY_BUFFER_BINDING, &buff);
  1530. if ((U32)buff != mGLBuffer)
  1531. {
  1532. llwarns_once << "Invalid GL vertex buffer bound: " << buff
  1533. << " - Expected: " << mGLBuffer << llendl;
  1534. }
  1535. if (mGLIndices)
  1536. {
  1537. glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING, &buff);
  1538. if ((U32)buff != mGLIndices)
  1539. {
  1540. llerrs << "Invalid GL index buffer bound: " << buff << llendl;
  1541. }
  1542. }
  1543. }
  1544. setupClientArrays(data_mask);
  1545. if (setup && data_mask && mGLBuffer)
  1546. {
  1547. setupVertexBuffer(data_mask);
  1548. }
  1549. }
  1550. // Set fast for rendering. For the legacy EE renderer only.
  1551. void LLVertexBuffer::setBufferFast(U32 data_mask)
  1552. {
  1553. // *HACK: in order to simplify the dual-renderer code and reduce the number
  1554. // of tests in it...
  1555. if (gUsePBRShaders)
  1556. {
  1557. setBuffer();
  1558. return;
  1559. }
  1560. // Set up pointers if the data mask is different ...
  1561. bool setup = sLastMask != data_mask;
  1562. bool bind_buffer = bindGLBufferFast();
  1563. bool bind_indices = bindGLIndicesFast();
  1564. setup = setup || bind_buffer || bind_indices;
  1565. setupClientArrays(data_mask);
  1566. if (data_mask && setup)
  1567. {
  1568. setupVertexBuffer(data_mask);
  1569. }
  1570. }
  1571. // New method used by the PBR renderer
  1572. void LLVertexBuffer::setBuffer()
  1573. {
  1574. LLGLSLShader* shaderp = LLGLSLShader::sCurBoundShaderPtr;
  1575. if (!shaderp)
  1576. {
  1577. // Issuing a simple warning and returning at this point would cause a
  1578. // crash later on; so just crash now, in a "clean" way and with a
  1579. // prominent error message (most likely, a shader failed to load). HB
  1580. llerrs << "No bound shader !" << llendl;
  1581. }
  1582. U32 data_mask = shaderp->mAttributeMask;
  1583. if (gDebugGL)
  1584. {
  1585. if (!mMappedVertexRegions.empty() || !mMappedIndexRegions.empty())
  1586. {
  1587. llwarns << "Data was pending on this buffer" << llendl;
  1588. }
  1589. if ((data_mask & mTypeMask) != data_mask)
  1590. {
  1591. llwarns << "Masks mismatch: shader mask = " << std::hex
  1592. << data_mask << " - VB mask = " << mTypeMask << std::dec
  1593. << llendl;
  1594. }
  1595. }
  1596. if (sGLRenderBuffer != mGLBuffer)
  1597. {
  1598. glBindBuffer(GL_ARRAY_BUFFER, mGLBuffer);
  1599. sGLRenderBuffer = mGLBuffer;
  1600. setupVertexBuffer(data_mask);
  1601. }
  1602. else if (sLastMask != data_mask)
  1603. {
  1604. setupVertexBuffer(data_mask);
  1605. sLastMask = data_mask;
  1606. }
  1607. if (mGLIndices != sGLRenderIndices)
  1608. {
  1609. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mGLIndices);
  1610. sGLRenderIndices = mGLIndices;
  1611. }
  1612. }
  1613. // Only to be used for external (non rendering) purpose, such as with GLOD. HB
  1614. void LLVertexBuffer::setBufferNoShader(U32 data_mask)
  1615. {
  1616. llassert_always(!LLGLSLShader::sCurBoundShaderPtr);
  1617. unmapBuffer();
  1618. bool setup = sLastMask != data_mask;
  1619. bool bind_buffer = mGLBuffer && bindGLBufferFast();
  1620. bool bind_indices = mGLIndices && bindGLIndicesFast();
  1621. setup |= bind_buffer || bind_indices;
  1622. setupClientArrays(data_mask);
  1623. if (setup && data_mask && mGLBuffer)
  1624. {
  1625. setupVertexBuffer(data_mask);
  1626. }
  1627. }
  1628. //virtual
  1629. void LLVertexBuffer::setupVertexBuffer(U32 data_mask)
  1630. {
  1631. if (!gUsePBRShaders)
  1632. {
  1633. data_mask &= ~mTypeMaskMask;
  1634. }
  1635. if (gDebugGL && !gUsePBRShaders && (data_mask & mTypeMask) != data_mask)
  1636. {
  1637. for (U32 i = 0; i < LLVertexBuffer::TYPE_MAX; ++i)
  1638. {
  1639. U32 mask = 1 << i;
  1640. if (mask & data_mask && !(mask & mTypeMask))
  1641. {
  1642. // Bit set in data_mask, but not set in mTypeMask
  1643. llwarns << "Missing required component " << vb_type_name[i]
  1644. << llendl;
  1645. }
  1646. }
  1647. llassert(false);
  1648. }
  1649. void* ptr;
  1650. // NOTE: the 'loc' variable is *required* to pass as reference (passing
  1651. // TYPE_* directly to glVertexAttribPointer() causes a crash), unlike
  1652. // the OpenGL documentation prototype for this function... Go figure ! HB
  1653. GLint loc;
  1654. if (data_mask & MAP_NORMAL)
  1655. {
  1656. loc = TYPE_NORMAL;
  1657. ptr = reinterpret_cast<void*>(mOffsets[TYPE_NORMAL]);
  1658. glVertexAttribPointer(loc, 3, GL_FLOAT, GL_FALSE, sizeof(LLVector4),
  1659. ptr);
  1660. }
  1661. if (data_mask & MAP_TEXCOORD3)
  1662. {
  1663. loc = TYPE_TEXCOORD3;
  1664. ptr = reinterpret_cast<void*>(mOffsets[TYPE_TEXCOORD3]);
  1665. glVertexAttribPointer(loc, 2, GL_FLOAT, GL_FALSE, sizeof(LLVector2),
  1666. ptr);
  1667. }
  1668. if (data_mask & MAP_TEXCOORD2)
  1669. {
  1670. loc = TYPE_TEXCOORD2;
  1671. ptr = reinterpret_cast<void*>(mOffsets[TYPE_TEXCOORD2]);
  1672. glVertexAttribPointer(loc, 2, GL_FLOAT, GL_FALSE, sizeof(LLVector2),
  1673. ptr);
  1674. }
  1675. if (data_mask & MAP_TEXCOORD1)
  1676. {
  1677. loc = TYPE_TEXCOORD1;
  1678. ptr = reinterpret_cast<void*>(mOffsets[TYPE_TEXCOORD1]);
  1679. glVertexAttribPointer(loc, 2, GL_FLOAT, GL_FALSE, sizeof(LLVector2),
  1680. ptr);
  1681. }
  1682. if (data_mask & MAP_TANGENT)
  1683. {
  1684. loc = TYPE_TANGENT;
  1685. ptr = reinterpret_cast<void*>(mOffsets[TYPE_TANGENT]);
  1686. glVertexAttribPointer(loc, 4, GL_FLOAT, GL_FALSE, sizeof(LLVector4),
  1687. ptr);
  1688. }
  1689. if (data_mask & MAP_TEXCOORD0)
  1690. {
  1691. loc = TYPE_TEXCOORD0;
  1692. ptr = reinterpret_cast<void*>(mOffsets[TYPE_TEXCOORD0]);
  1693. glVertexAttribPointer(loc, 2, GL_FLOAT, GL_FALSE, sizeof(LLVector2),
  1694. ptr);
  1695. }
  1696. if (data_mask & MAP_COLOR)
  1697. {
  1698. loc = TYPE_COLOR;
  1699. // Bind emissive instead of color pointer if emissive is present
  1700. if (data_mask & MAP_EMISSIVE)
  1701. {
  1702. ptr = reinterpret_cast<void*>(mOffsets[TYPE_EMISSIVE]);
  1703. }
  1704. else
  1705. {
  1706. ptr = reinterpret_cast<void*>(mOffsets[TYPE_COLOR]);
  1707. }
  1708. // Note: sTypeSize[TYPE_COLOR] == sTypeSize[TYPE_EMISSIVE]. HB
  1709. glVertexAttribPointer(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE,
  1710. sizeof(LLColor4U), ptr);
  1711. }
  1712. if (data_mask & MAP_EMISSIVE)
  1713. {
  1714. loc = TYPE_EMISSIVE;
  1715. ptr = reinterpret_cast<void*>(mOffsets[TYPE_EMISSIVE]);
  1716. glVertexAttribPointer(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE,
  1717. sizeof(LLColor4U), ptr);
  1718. if (!(data_mask & MAP_COLOR))
  1719. {
  1720. // Map emissive to color channel when color is not also being bound
  1721. // to avoid unnecessary shader swaps
  1722. loc = TYPE_COLOR;
  1723. glVertexAttribPointer(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE,
  1724. sizeof(LLColor4U), ptr);
  1725. }
  1726. }
  1727. if (data_mask & MAP_WEIGHT)
  1728. {
  1729. loc = TYPE_WEIGHT;
  1730. ptr = reinterpret_cast<void*>(mOffsets[TYPE_WEIGHT]);
  1731. glVertexAttribPointer(loc, 1, GL_FLOAT, GL_FALSE, sizeof(F32), ptr);
  1732. }
  1733. if (data_mask & MAP_WEIGHT4)
  1734. {
  1735. loc = TYPE_WEIGHT4;
  1736. ptr = reinterpret_cast<void*>(mOffsets[TYPE_WEIGHT4]);
  1737. glVertexAttribPointer(loc, 4, GL_FLOAT, GL_FALSE, sizeof(LLVector4),
  1738. ptr);
  1739. }
  1740. if (data_mask & MAP_JOINT)
  1741. {
  1742. loc = TYPE_JOINT;
  1743. ptr = reinterpret_cast<void*>(mOffsets[TYPE_JOINT]);
  1744. glVertexAttribIPointer(loc, 4, GL_UNSIGNED_SHORT, sizeof(U64), ptr);
  1745. }
  1746. if (data_mask & MAP_CLOTHWEIGHT)
  1747. {
  1748. loc = TYPE_CLOTHWEIGHT;
  1749. ptr = reinterpret_cast<void*>(mOffsets[TYPE_CLOTHWEIGHT]);
  1750. glVertexAttribPointer(loc, 4, GL_FLOAT, GL_TRUE, sizeof(LLVector4),
  1751. ptr);
  1752. }
  1753. if (data_mask & MAP_TEXTURE_INDEX && gGLManager.mHasVertexAttribIPointer)
  1754. {
  1755. loc = TYPE_TEXTURE_INDEX;
  1756. ptr = reinterpret_cast<void*>(mOffsets[TYPE_VERTEX] + 12);
  1757. glVertexAttribIPointer(loc, 1, GL_UNSIGNED_INT, sizeof(LLVector4),
  1758. ptr);
  1759. }
  1760. if (data_mask & MAP_VERTEX)
  1761. {
  1762. loc = TYPE_VERTEX;
  1763. ptr = reinterpret_cast<void*>(mOffsets[TYPE_VERTEX]);
  1764. glVertexAttribPointer(loc, 3, GL_FLOAT, GL_FALSE, sizeof(LLVector4),
  1765. ptr);
  1766. }
  1767. stop_glerror();
  1768. ++sSetCount;
  1769. }
  1770. void LLVertexBuffer::setPositionData(const LLVector4a* data)
  1771. {
  1772. mCachedBuffer = true;
  1773. if (!gUsePBRShaders)
  1774. {
  1775. bindGLBuffer();
  1776. }
  1777. flush_vbo(GL_ARRAY_BUFFER, 0, mNumVerts * sizeof(LLVector4a) - 1, (U8*)data);
  1778. }
  1779. void LLVertexBuffer::setTexCoord0Data(const LLVector2* data)
  1780. {
  1781. if (!gUsePBRShaders)
  1782. {
  1783. bindGLBuffer();
  1784. }
  1785. U32 start = mOffsets[TYPE_TEXCOORD0];
  1786. flush_vbo(GL_ARRAY_BUFFER, start,
  1787. start + mNumVerts * sizeof(LLVector2) - 1, (U8*)data);
  1788. }
  1789. void LLVertexBuffer::setTexCoord1Data(const LLVector2* data)
  1790. {
  1791. if (!gUsePBRShaders)
  1792. {
  1793. bindGLBuffer();
  1794. }
  1795. U32 start = mOffsets[TYPE_TEXCOORD1];
  1796. flush_vbo(GL_ARRAY_BUFFER, start,
  1797. start + mNumVerts * sizeof(LLVector2) - 1, (U8*)data);
  1798. }
  1799. void LLVertexBuffer::setColorData(const LLColor4U* data)
  1800. {
  1801. if (!gUsePBRShaders)
  1802. {
  1803. bindGLBuffer();
  1804. }
  1805. U32 start = mOffsets[TYPE_COLOR];
  1806. flush_vbo(GL_ARRAY_BUFFER, start,
  1807. start + mNumVerts * sizeof(LLColor4U) - 1, (U8*)data);
  1808. }
  1809. // For use by the llgltf library
  1810. void LLVertexBuffer::setIndexData(const U16* data)
  1811. {
  1812. flush_vbo(GL_ELEMENT_ARRAY_BUFFER, 0, sizeof(U16) * mNumIndices - 1,
  1813. (U8*)data);
  1814. }
  1815. void LLVertexBuffer::setIndexData(const U32* data)
  1816. {
  1817. if (mIndicesType != GL_UNSIGNED_INT)
  1818. {
  1819. // *HACK: vertex buffers are initialized as 16 bits indices, but can be
  1820. // switched to 32 bits indices.
  1821. mIndicesType = GL_UNSIGNED_INT;
  1822. mIndicesStride = sizeof(U32);
  1823. mNumIndices /= 2;
  1824. }
  1825. flush_vbo(GL_ELEMENT_ARRAY_BUFFER, 0, sizeof(U32) * mNumIndices - 1,
  1826. (U8*)data);
  1827. }
  1828. void LLVertexBuffer::setNormalData(const LLVector4a* data)
  1829. {
  1830. U32 start = mOffsets[TYPE_NORMAL];
  1831. flush_vbo(GL_ARRAY_BUFFER, start,
  1832. start + sizeof(LLVector4) * mNumVerts - 1, (U8*)data);
  1833. }
  1834. void LLVertexBuffer::setTangentData(const LLVector4a* data)
  1835. {
  1836. U32 start = mOffsets[TYPE_TANGENT];
  1837. flush_vbo(GL_ARRAY_BUFFER, start,
  1838. start + sizeof(LLVector4) * mNumVerts - 1, (U8*)data);
  1839. }
  1840. void LLVertexBuffer::setWeight4Data(const LLVector4a* data)
  1841. {
  1842. U32 start = mOffsets[TYPE_WEIGHT4];
  1843. flush_vbo(GL_ARRAY_BUFFER, start,
  1844. start + sizeof(LLVector4) * mNumVerts - 1, (U8*)data);
  1845. }
  1846. void LLVertexBuffer::setJointData(const U64* data)
  1847. {
  1848. U32 start = mOffsets[TYPE_JOINT];
  1849. flush_vbo(GL_ARRAY_BUFFER, start, start + sizeof(U64) * mNumVerts - 1,
  1850. (U8*)data);
  1851. }
  1852. void LLVertexBuffer::setPositionData(const LLVector4a* data, U32 offset, U32 count)
  1853. {
  1854. constexpr U32 size = (U32)sizeof(LLVector4a);
  1855. flush_vbo(GL_ARRAY_BUFFER, offset * size, (offset + count) * size - 1,
  1856. (U8*)data);
  1857. }
  1858. void LLVertexBuffer::setTexCoord0Data(const LLVector2* data, U32 offset,
  1859. U32 count)
  1860. {
  1861. constexpr U32 size = (U32)sizeof(LLVector2);
  1862. U32 start = mOffsets[TYPE_TEXCOORD0];
  1863. flush_vbo(GL_ARRAY_BUFFER,
  1864. start + offset * size, start + (offset + count) * size - 1,
  1865. (U8*)data);
  1866. }
  1867. void LLVertexBuffer::setTexCoord1Data(const LLVector2* data, U32 offset,
  1868. U32 count)
  1869. {
  1870. constexpr U32 size = (U32)sizeof(LLVector2);
  1871. U32 start = mOffsets[TYPE_TEXCOORD1];
  1872. flush_vbo(GL_ARRAY_BUFFER,
  1873. start + offset * size, start + (offset + count) * size - 1,
  1874. (U8*)data);
  1875. }
  1876. void LLVertexBuffer::setColorData(const LLColor4U* data, U32 offset, U32 count)
  1877. {
  1878. constexpr U32 size = (U32)sizeof(LLColor4U);
  1879. U32 start = mOffsets[TYPE_COLOR];
  1880. flush_vbo(GL_ARRAY_BUFFER,
  1881. start + offset * size, start + (offset + count) * size - 1,
  1882. (U8*)data);
  1883. }
  1884. void LLVertexBuffer::setIndexData(const U16* data, U32 offset, U32 count)
  1885. {
  1886. constexpr U32 size = (U32)sizeof(U16);
  1887. flush_vbo(GL_ELEMENT_ARRAY_BUFFER,
  1888. offset * size, (offset + count) * size - 1, (U8*)data);
  1889. }
  1890. void LLVertexBuffer::setIndexData(const U32* data, U32 offset, U32 count)
  1891. {
  1892. constexpr U32 size = (U32)sizeof(U32);
  1893. if (mIndicesType != GL_UNSIGNED_INT)
  1894. {
  1895. // *HACK: vertex buffers are initialized as 16 bits indices, but can be
  1896. // switched to 32 bits indices.
  1897. mIndicesType = GL_UNSIGNED_INT;
  1898. mIndicesStride = size;
  1899. mNumIndices /= 2;
  1900. }
  1901. flush_vbo(GL_ELEMENT_ARRAY_BUFFER,
  1902. offset * size, (offset + count) * size - 1, (U8*)data);
  1903. }
  1904. void LLVertexBuffer::setNormalData(const LLVector4a* data, U32 offset,
  1905. U32 count)
  1906. {
  1907. constexpr U32 size = (U32)sizeof(LLVector4a);
  1908. U32 start = mOffsets[TYPE_NORMAL];
  1909. flush_vbo(GL_ARRAY_BUFFER,
  1910. start + offset * size, start + (offset + count) * size - 1,
  1911. (U8*)data);
  1912. }
  1913. void LLVertexBuffer::setTangentData(const LLVector4a* data, U32 offset,
  1914. U32 count)
  1915. {
  1916. constexpr U32 size = (U32)sizeof(LLVector4a);
  1917. U32 start = mOffsets[TYPE_TANGENT];
  1918. flush_vbo(GL_ARRAY_BUFFER,
  1919. start + offset * size, start + (offset + count) * size - 1,
  1920. (U8*)data);
  1921. }
  1922. void LLVertexBuffer::setWeight4Data(const LLVector4a* data, U32 offset,
  1923. U32 count)
  1924. {
  1925. constexpr U32 size = (U32)sizeof(LLVector4a);
  1926. U32 start = mOffsets[TYPE_WEIGHT4];
  1927. flush_vbo(GL_ARRAY_BUFFER,
  1928. start + offset * size, start + (offset + count) * size - 1,
  1929. (U8*)data);
  1930. }
  1931. void LLVertexBuffer::setJointData(const U64* data, U32 offset, U32 count)
  1932. {
  1933. constexpr U32 size = (U32)sizeof(U64);
  1934. U32 start = mOffsets[TYPE_JOINT];
  1935. flush_vbo(GL_ARRAY_BUFFER,
  1936. start + offset * size, start + (offset + count) * size - 1,
  1937. (U8*)data);
  1938. }