llmeshoptimizer.cpp 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /**
  2. * @file llmeshoptimizer.cpp
  3. * @brief Wrapper around the meshoptimizer library
  4. *
  5. * $LicenseInfo:firstyear=2021&license=viewergpl$
  6. *
  7. * Copyright (c) 2021, Linden Research, Inc.
  8. *
  9. * Second Life Viewer Source Code
  10. * The source code in this file ("Source Code") is provided by Linden Lab
  11. * to you under the terms of the GNU General Public License, version 2.0
  12. * ("GPL"), unless you have obtained a separate licensing agreement
  13. * ("Other License"), formally executed by you and Linden Lab. Terms of
  14. * the GPL can be found in doc/GPL-license.txt in this distribution, or
  15. * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
  16. *
  17. * There are special exceptions to the terms and conditions of the GPL as
  18. * it is applied to this Source Code. View the full text of the exception
  19. * in the file doc/FLOSS-exception.txt in this software distribution, or
  20. * online at
  21. * http://secondlifegrid.net/programs/open_source/licensing/flossexception
  22. *
  23. * By copying, modifying or distributing this software, you acknowledge
  24. * that you have read and understood your obligations described above,
  25. * and agree to abide by those obligations.
  26. *
  27. * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
  28. * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
  29. * COMPLETENESS OR PERFORMANCE.
  30. * $/LicenseInfo$
  31. */
  32. #include "linden_common.h"
  33. #include "meshoptimizer.h"
  34. #include "llmeshoptimizer.h"
  35. #include "llvolume.h"
  36. //static
  37. void LLMeshOptimizer::generateShadowIndexBuffer16(U16* dest,
  38. const U16* indices,
  39. U64 idx_count,
  40. const LLVector4a* vert_pos,
  41. const LLVector4a* normals,
  42. const LLVector2* tex_coords,
  43. U64 vert_count)
  44. {
  45. meshopt_Stream streams[3];
  46. S32 index = 0;
  47. if (vert_pos)
  48. {
  49. streams[index].data = (const F32*)vert_pos;
  50. // Despite being LLVector4a, only x, y and z are in use
  51. streams[index].size = 3 * sizeof(F32);
  52. streams[index++].stride = 4 * sizeof(F32);
  53. }
  54. if (normals)
  55. {
  56. streams[index].data = (const F32*)normals;
  57. // Despite being LLVector4a, only x, y and z are in use
  58. streams[index].size = 3 * sizeof(F32);
  59. streams[index++].stride = 4 * sizeof(F32);
  60. }
  61. if (tex_coords)
  62. {
  63. streams[index].data = (const F32*)tex_coords;
  64. streams[index].size = 2 * sizeof(F32);
  65. streams[index++].stride = 2 * sizeof(F32);
  66. }
  67. if (!index)
  68. {
  69. return; // Invalid. Abort.
  70. }
  71. meshopt_generateShadowIndexBufferMulti<U16>(dest, indices, idx_count,
  72. vert_count, streams, index);
  73. }
  74. //static
  75. void LLMeshOptimizer::generateShadowIndexBuffer32(U32* dest,
  76. const U32* indices,
  77. U64 idx_count,
  78. const LLVector4a* vert_pos,
  79. const LLVector4a* normals,
  80. const LLVector2* tex_coords,
  81. U64 vert_count)
  82. {
  83. meshopt_Stream streams[3];
  84. S32 index = 0;
  85. if (vert_pos)
  86. {
  87. streams[index].data = (const F32*)vert_pos;
  88. // Despite being LLVector4a, only x, y and z are in use
  89. streams[index].size = 3 * sizeof(F32);
  90. streams[index++].stride = 4 * sizeof(F32);
  91. }
  92. if (normals)
  93. {
  94. streams[index].data = (const F32*)normals;
  95. // Despite being LLVector4a, only x, y and z are in use
  96. streams[index].size = 3 * sizeof(F32);
  97. streams[index++].stride = 4 * sizeof(F32);
  98. }
  99. if (tex_coords)
  100. {
  101. streams[index].data = (const F32*)tex_coords;
  102. streams[index].size = 2 * sizeof(F32);
  103. streams[index++].stride = 2 * sizeof(F32);
  104. }
  105. if (!index)
  106. {
  107. return; // Invalid. Abort.
  108. }
  109. meshopt_generateShadowIndexBufferMulti<U32>(dest, indices, idx_count,
  110. vert_count, streams, index);
  111. }
  112. //static
  113. size_t LLMeshOptimizer::generateRemapMulti16(U32* remap,
  114. const U16* indices,
  115. U64 index_count,
  116. const LLVector4a* vert_pos,
  117. const LLVector4a* normals,
  118. const LLVector2* tex_coords,
  119. U64 vert_count)
  120. {
  121. U32* indices_u32 = NULL;
  122. // Remap can function without indices, but providing indices helps with
  123. // removing unused vertices.
  124. if (indices)
  125. {
  126. indices_u32 = (U32*)allocate_volume_mem(index_count * sizeof(U32));
  127. if (!indices_u32)
  128. {
  129. LLMemory::allocationFailed();
  130. llwarns << "Out of memory trying to convert indices" << llendl;
  131. return 0;
  132. }
  133. S32 out_of_range_count = 0;
  134. for (U64 i = 0; i < index_count; ++i)
  135. {
  136. if (indices[i] < vert_count)
  137. {
  138. indices_u32[i] = indices[i];
  139. }
  140. else
  141. {
  142. ++out_of_range_count;
  143. indices_u32[i] = 0;
  144. }
  145. }
  146. if (out_of_range_count)
  147. {
  148. llwarns << out_of_range_count
  149. << " indices were out of range (now zeroed)." << llendl;
  150. }
  151. }
  152. size_t unique = generateRemapMulti32(remap, indices_u32, index_count,
  153. vert_pos, normals, tex_coords,
  154. vert_count);
  155. free_volume_mem(indices_u32);
  156. return unique;
  157. }
  158. //static
  159. size_t LLMeshOptimizer::generateRemapMulti32(U32* remap,
  160. const U32* indices,
  161. U64 index_count,
  162. const LLVector4a* vert_pos,
  163. const LLVector4a* normals,
  164. const LLVector2* tex_coords,
  165. U64 vert_count)
  166. {
  167. meshopt_Stream streams[] =
  168. {
  169. { (const F32*)vert_pos, sizeof(F32) * 3, sizeof(F32) * 4 },
  170. { (const F32*)normals, sizeof(F32) * 3, sizeof(F32) * 4 },
  171. { (const F32*)tex_coords, sizeof(F32) * 2, sizeof(F32) * 2 },
  172. };
  173. constexpr size_t streams_elements = LL_ARRAY_SIZE(streams);
  174. // Remap can function without indices, but providing indices helps with
  175. // removing unused vertices.
  176. U64 indices_cnt = indices ? index_count : vert_count;
  177. // Note: this will fail on assert should indices[i] >= vert_count happen.
  178. return meshopt_generateVertexRemapMulti(remap, indices, indices_cnt,
  179. vert_count, streams,
  180. streams_elements);
  181. }
  182. //static
  183. void LLMeshOptimizer::remapIndexBuffer16(U16* dest, const U16* indices,
  184. U64 index_count, const U32* remap)
  185. {
  186. meshopt_remapIndexBuffer<U16>(dest, indices, index_count, remap);
  187. }
  188. //static
  189. void LLMeshOptimizer::remapIndexBuffer32(U32* dest, const U32* indices,
  190. U64 index_count, const U32* remap)
  191. {
  192. meshopt_remapIndexBuffer<U32>(dest, indices, index_count, remap);
  193. }
  194. //static
  195. void LLMeshOptimizer::remapVertsBuffer(LLVector4a* dest,
  196. const LLVector4a* verts, U64 count,
  197. const U32* remap)
  198. {
  199. meshopt_remapVertexBuffer((F32*)dest, (const F32*)verts, count,
  200. sizeof(LLVector4a), remap);
  201. }
  202. //static
  203. void LLMeshOptimizer::remapTexCoordsBuffer(LLVector2* dest, const LLVector2* tc,
  204. U64 tc_count, const U32* remap)
  205. {
  206. meshopt_remapVertexBuffer((F32*)dest, (const F32*)tc, tc_count,
  207. sizeof(LLVector2), remap);
  208. }
  209. //static
  210. size_t LLMeshOptimizer::simplify16(U16* dest, const U16* indices, U64 idx_count,
  211. const LLVector4a* vert_pos, U64 vert_count,
  212. U64 vert_pos_stride, U64 target_idx_count,
  213. F32 target_error, bool sloppy,
  214. F32* result_error)
  215. {
  216. if (sloppy)
  217. {
  218. return meshopt_simplifySloppy<U16>(dest, indices, idx_count,
  219. (const F32*)vert_pos, vert_count,
  220. vert_pos_stride, target_idx_count,
  221. target_error, result_error);
  222. }
  223. return meshopt_simplify<U16>(dest, indices, idx_count, (const F32*)vert_pos,
  224. vert_count, vert_pos_stride, target_idx_count,
  225. target_error,
  226. #if MESHOPTIMIZER_VERSION >= 180
  227. meshopt_SimplifyLockBorder,
  228. #endif
  229. result_error);
  230. }
  231. //static
  232. size_t LLMeshOptimizer::simplify32(U32* dest, const U32* indices,
  233. U64 idx_count, const LLVector4a* vert_pos,
  234. U64 vert_count, U64 vert_pos_stride,
  235. U64 target_idx_count, F32 target_error,
  236. bool sloppy, F32* result_error)
  237. {
  238. if (sloppy)
  239. {
  240. return meshopt_simplifySloppy<U32>(dest, indices, idx_count,
  241. (const F32*)vert_pos, vert_count,
  242. vert_pos_stride, target_idx_count,
  243. target_error, result_error);
  244. }
  245. return meshopt_simplify<U32>(dest, indices, idx_count, (const F32*)vert_pos,
  246. vert_count, vert_pos_stride, target_idx_count,
  247. target_error,
  248. #if MESHOPTIMIZER_VERSION >= 180
  249. meshopt_SimplifyLockBorder,
  250. #endif
  251. result_error);
  252. }