llmodel.cpp 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221
  1. /**
  2. * @file llmodel.cpp
  3. * @brief Model handling implementation
  4. *
  5. * $LicenseInfo:firstyear=2001&license=viewergpl$
  6. *
  7. * Copyright (c) 2010, Linden Research, Inc.
  8. *
  9. * Second Life Viewer Source Code
  10. * The source code in this file ("Source Code") is provided by Linden Lab
  11. * to you under the terms of the GNU General Public License, version 2.0
  12. * ("GPL"), unless you have obtained a separate licensing agreement
  13. * ("Other License"), formally executed by you and Linden Lab. Terms of
  14. * the GPL can be found in doc/GPL-license.txt in this distribution, or
  15. * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
  16. *
  17. * There are special exceptions to the terms and conditions of the GPL as
  18. * it is applied to this Source Code. View the full text of the exception
  19. * in the file doc/FLOSS-exception.txt in this software distribution, or
  20. * online at
  21. * http://secondlifegrid.net/programs/open_source/licensing/flossexception
  22. *
  23. * By copying, modifying or distributing this software, you acknowledge
  24. * that you have read and understood your obligations described above,
  25. * and agree to abide by those obligations.
  26. *
  27. * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
  28. * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
  29. * COMPLETENESS OR PERFORMANCE.
  30. * $/LicenseInfo$
  31. */
  32. #include "linden_common.h"
  33. #include <memory>
  34. #include "zlib.h"
  35. #include "llmodel.h"
  36. #include "llconvexdecomposition.h"
  37. #include "lljoint.h"
  38. #include "llmath.h"
  39. #include "llsdserialize.h"
  40. #include "hbxxh.h"
  41. ///////////////////////////////////////////////////////////////////////////////
  42. // LLMeshSkinInfo class
  43. ///////////////////////////////////////////////////////////////////////////////
  44. LLMeshSkinInfo::LLMeshSkinInfo()
  45. : mHash(0),
  46. mPelvisOffset(0.f),
  47. mLockScaleIfJointPosition(false),
  48. mInvalidJointsScrubbed(false)
  49. {
  50. }
  51. LLMeshSkinInfo::LLMeshSkinInfo(const LLSD& skin)
  52. : mHash(0),
  53. mPelvisOffset(0.f),
  54. mLockScaleIfJointPosition(false),
  55. mInvalidJointsScrubbed(false)
  56. {
  57. fromLLSD(skin);
  58. }
  59. LLMeshSkinInfo::LLMeshSkinInfo(const LLSD& skin, const LLUUID& mesh_id)
  60. : mHash(0),
  61. mMeshID(mesh_id),
  62. mPelvisOffset(0.f),
  63. mLockScaleIfJointPosition(false),
  64. mInvalidJointsScrubbed(false)
  65. {
  66. fromLLSD(skin);
  67. }
  68. void LLMeshSkinInfo::clone(const LLMeshSkinInfo& from)
  69. {
  70. mMeshID = from.mMeshID;
  71. mBindShapeMatrix = from.mBindShapeMatrix;
  72. mJointNames = from.mJointNames;
  73. mJointKeys = from.mJointKeys;
  74. mInvBindMatrix = from.mInvBindMatrix;
  75. mAlternateBindMatrix = from.mAlternateBindMatrix;
  76. mHash = from.mHash;
  77. mPelvisOffset = from.mPelvisOffset;
  78. mLockScaleIfJointPosition = from.mLockScaleIfJointPosition;
  79. mInvalidJointsScrubbed = from.mLockScaleIfJointPosition;
  80. }
  81. void LLMeshSkinInfo::fromLLSD(const LLSD& skin)
  82. {
  83. if (skin.has("joint_names"))
  84. {
  85. for (U32 i = 0, count = skin["joint_names"].size(); i < count; ++i)
  86. {
  87. std::string name = skin["joint_names"][i];
  88. mJointNames.emplace_back(name);
  89. mJointKeys.push_back(LLJoint::getKey(name));
  90. }
  91. }
  92. if (skin.has("inverse_bind_matrix"))
  93. {
  94. for (U32 i = 0, count = skin["inverse_bind_matrix"].size();
  95. i < count; ++i)
  96. {
  97. LLMatrix4 mat;
  98. for (U32 j = 0; j < 4; ++j)
  99. {
  100. for (U32 k = 0; k < 4; ++k)
  101. {
  102. mat.mMatrix[j][k] =
  103. skin["inverse_bind_matrix"][i][j * 4 + k].asReal();
  104. }
  105. }
  106. mInvBindMatrix.push_back(mat);
  107. }
  108. }
  109. if (mJointNames.size() != mInvBindMatrix.size())
  110. {
  111. llwarns << "Joints vs bind matrix count mismatch. Dropping joint bindings for mesh "
  112. << mMeshID << llendl;
  113. mJointNames.clear();
  114. mJointKeys.clear();
  115. mInvBindMatrix.clear();
  116. }
  117. if (skin.has("bind_shape_matrix"))
  118. {
  119. for (U32 j = 0; j < 4; ++j)
  120. {
  121. for (U32 k = 0; k < 4; ++k)
  122. {
  123. mBindShapeMatrix.mMatrix[j][k] =
  124. skin["bind_shape_matrix"][j * 4 + k].asReal();
  125. }
  126. }
  127. }
  128. size_t mat_size = llmin(mInvBindMatrix.size(),
  129. LL_CHARACTER_MAX_ANIMATED_JOINTS);
  130. mInvBindShapeMatrix.resize(mat_size);
  131. if (mat_size)
  132. {
  133. LLMatrix4a bind_shape, inv_bind, mat;
  134. bind_shape.loadu(mBindShapeMatrix);
  135. for (size_t i = 0; i < mat_size; ++i)
  136. {
  137. inv_bind.loadu(mInvBindMatrix[i]);
  138. mat.matMul(bind_shape, inv_bind);
  139. mInvBindShapeMatrix[i].set(mat.getF32ptr());
  140. }
  141. }
  142. if (skin.has("alt_inverse_bind_matrix"))
  143. {
  144. for (U32 i = 0, count = skin["alt_inverse_bind_matrix"].size();
  145. i < count; ++i)
  146. {
  147. LLMatrix4 mat;
  148. for (U32 j = 0; j < 4; ++j)
  149. {
  150. for (U32 k = 0; k < 4; ++k)
  151. {
  152. mat.mMatrix[j][k] =
  153. skin["alt_inverse_bind_matrix"][i][j * 4 + k].asReal();
  154. }
  155. }
  156. mAlternateBindMatrix.push_back(mat);
  157. }
  158. }
  159. if (skin.has("pelvis_offset"))
  160. {
  161. mPelvisOffset = skin["pelvis_offset"].asReal();
  162. }
  163. if (skin.has("lock_scale_if_joint_position"))
  164. {
  165. mLockScaleIfJointPosition =
  166. skin["lock_scale_if_joint_position"].asBoolean();
  167. }
  168. else
  169. {
  170. mLockScaleIfJointPosition = false;
  171. }
  172. updateHash();
  173. }
  174. LLSD LLMeshSkinInfo::asLLSD(bool include_joints,
  175. bool lock_scale_if_joint_position) const
  176. {
  177. LLSD ret;
  178. U32 joint_names_count = mJointNames.size();
  179. for (U32 i = 0; i < joint_names_count; ++i)
  180. {
  181. ret["joint_names"][i] = mJointNames[i];
  182. for (U32 j = 0; j < 4; ++j)
  183. {
  184. for (U32 k = 0; k < 4; ++k)
  185. {
  186. ret["inverse_bind_matrix"][i][j * 4 + k] =
  187. mInvBindMatrix[i].mMatrix[j][k];
  188. }
  189. }
  190. }
  191. for (U32 i = 0; i < 4; ++i)
  192. {
  193. for (U32 j = 0; j < 4; ++j)
  194. {
  195. ret["bind_shape_matrix"][i * 4 + j] =
  196. mBindShapeMatrix.mMatrix[i][j];
  197. }
  198. }
  199. if (include_joints && mAlternateBindMatrix.size() > 0)
  200. {
  201. for (U32 i = 0; i < joint_names_count; ++i)
  202. {
  203. for (U32 j = 0; j < 4; ++j)
  204. {
  205. for (U32 k = 0; k < 4; ++k)
  206. {
  207. ret["alt_inverse_bind_matrix"][i][j * 4 + k] =
  208. mAlternateBindMatrix[i].mMatrix[j][k];
  209. }
  210. }
  211. }
  212. if (lock_scale_if_joint_position)
  213. {
  214. ret["lock_scale_if_joint_position"] = lock_scale_if_joint_position;
  215. }
  216. ret["pelvis_offset"] = mPelvisOffset;
  217. }
  218. return ret;
  219. }
  220. void LLMeshSkinInfo::updateHash(bool force)
  221. {
  222. // When the mesh UUID is known (which is always the case for LLMeshSkinInfo
  223. // instances created by the mesh repository), use its 64 bits digest; there
  224. // is no need to hash anything else, since a skin with the same UUID always
  225. // got the same joints, inverse bind matrix, etc. HB
  226. if (!force && mMeshID.notNull())
  227. {
  228. mHash = mMeshID.getDigest64();
  229. return;
  230. }
  231. // Let's use our super-fast vectorized 64 bits hashing. HB
  232. HBXXH64 hash;
  233. // Hash joint names (like in LL's performance viewer). HB
  234. for (U32 i = 0, count = mJointNames.size(); i < count; ++i)
  235. {
  236. hash.update(mJointNames[i]);
  237. }
  238. // Hash joint keys (LL's performance viewer uses joint numbers instead). HB
  239. hash.update((const void*)mJointKeys.data(),
  240. sizeof(U32) * mJointKeys.size());
  241. // Hash inverse bind matrix (like in LL's performance viewer).
  242. // Note: there should be no padding/aligment issue between elements in the
  243. // mInvBindMatrix LLMatrix4s vector, given that an LLMatrix4 is represented
  244. // by 16 32 bits values (64 bytes). So we can save a loop here and hash the
  245. // whole vector as one contiguous block of data. HB
  246. hash.update((const void*)mInvBindMatrix.data(),
  247. sizeof(LLMatrix4) * mInvBindMatrix.size());
  248. mHash = hash.digest();
  249. }
  250. ///////////////////////////////////////////////////////////////////////////////
  251. // LLModel class
  252. ///////////////////////////////////////////////////////////////////////////////
  253. static std::string model_names[] =
  254. {
  255. "lowest_lod",
  256. "low_lod",
  257. "medium_lod",
  258. "high_lod",
  259. "physics_mesh"
  260. };
  261. static const int MODEL_NAMES_LENGTH = sizeof(model_names) /
  262. sizeof(std::string);
  263. LLModel::LLModel(const LLVolumeParams& params, F32 detail)
  264. : LLVolume(params, detail),
  265. mNormalizedScale(1.f, 1.f, 1.f),
  266. mPelvisOffset(0.f),
  267. mStatus(NO_ERRORS),
  268. mSubmodelID(0),
  269. mDecompID(-1),
  270. mLocalID(-1)
  271. {
  272. }
  273. LLModel::~LLModel()
  274. {
  275. if (mDecompID >= 0)
  276. {
  277. LLConvexDecomposition::getInstance()->deleteDecomposition(mDecompID);
  278. }
  279. mPhysics.mMesh.clear();
  280. }
  281. std::string LLModel::getName() const
  282. {
  283. return mRequestedLabel.empty() ? mLabel : mRequestedLabel;
  284. }
  285. //static
  286. std::string LLModel::getStatusString(U32 status)
  287. {
  288. static const std::string status_strings[(size_t)INVALID_STATUS + 1] =
  289. {
  290. "status_no_error",
  291. "status_vertex_number_overflow",
  292. "bad_element"
  293. "invalid status"
  294. };
  295. return status < INVALID_STATUS ? status_strings[status]
  296. : status_strings[INVALID_STATUS];
  297. }
  298. void LLModel::offsetMesh(const LLVector3& pivotPoint)
  299. {
  300. LLVector4a pivot(pivotPoint[VX], pivotPoint[VY], pivotPoint[VZ]);
  301. for (std::vector<LLVolumeFace>::iterator faceIt = mVolumeFaces.begin();
  302. faceIt != mVolumeFaces.end(); )
  303. {
  304. std::vector<LLVolumeFace>:: iterator currentFaceIt = faceIt++;
  305. LLVolumeFace& face = *currentFaceIt;
  306. LLVector4a* pos = (LLVector4a*)face.mPositions;
  307. for (S32 i = 0, count = face.mNumVertices; i < count ; ++i)
  308. {
  309. pos[i].add(pivot);
  310. }
  311. }
  312. }
  313. void LLModel::remapVolumeFaces()
  314. {
  315. for (S32 i = 0; i < getNumVolumeFaces(); ++i)
  316. {
  317. mVolumeFaces[i].remap();
  318. }
  319. }
  320. void LLModel::optimizeVolumeFaces()
  321. {
  322. for (S32 i = 0; i < getNumVolumeFaces(); ++i)
  323. {
  324. mVolumeFaces[i].optimize();
  325. }
  326. }
  327. struct MaterialBinding
  328. {
  329. S32 index;
  330. std::string matName;
  331. };
  332. struct MaterialSort
  333. {
  334. bool operator()(const MaterialBinding& lhs, const MaterialBinding& rhs)
  335. {
  336. return LLStringUtil::compareInsensitive(lhs.matName, rhs.matName) < 0;
  337. }
  338. };
  339. void LLModel::sortVolumeFacesByMaterialName()
  340. {
  341. S32 count = mVolumeFaces.size();
  342. S32 mat_count = mMaterialList.size();
  343. if (!count || !mat_count)
  344. {
  345. return; // Nothing to do
  346. }
  347. std::vector<MaterialBinding> bindings;
  348. bindings.resize(count);
  349. for (S32 i = 0; i < count; ++i)
  350. {
  351. bindings[i].index = i;
  352. if (i < mat_count)
  353. {
  354. bindings[i].matName = mMaterialList[i];
  355. }
  356. }
  357. std::sort(bindings.begin(), bindings.end(), MaterialSort());
  358. // Re-map the faces to be in the same order the mats now are...
  359. std::vector<LLVolumeFace> new_faces;
  360. new_faces.resize(count);
  361. for (S32 i = 0; i < count; ++i)
  362. {
  363. new_faces[i] = mVolumeFaces[bindings[i].index];
  364. if (i < mat_count)
  365. {
  366. mMaterialList[i] = bindings[i].matName;
  367. }
  368. }
  369. mVolumeFaces = new_faces;
  370. }
  371. void LLModel::trimVolumeFacesToSize(U32 new_count,
  372. LLVolume::face_list_t* remainder)
  373. {
  374. llassert(new_count <= (U32)LL_SCULPT_MESH_MAX_FACES);
  375. if (new_count && (U32)getNumVolumeFaces() > new_count)
  376. {
  377. // Copy out remaining volume faces for alternative handling, if
  378. // provided
  379. if (remainder)
  380. {
  381. (*remainder).assign(mVolumeFaces.begin() + new_count,
  382. mVolumeFaces.end());
  383. }
  384. // Trim down to the final set of volume faces (now stuffed to the
  385. // gills !)
  386. mVolumeFaces.resize(new_count);
  387. }
  388. }
  389. #if LL_NORMALIZE_ALL_MODELS
  390. // Shrink group of models to fit on a 1x1x1 cube centered at the origin.
  391. void LLModel::normalizeModels(const std::vector<LLPointer<LLModel> >& model_list)
  392. {
  393. S32 models_count = model_list.size();
  394. S32 n = 0;
  395. while (n < models_count && model_list[n]->mVolumeFaces.empty())
  396. {
  397. ++n;
  398. }
  399. // no model with faces
  400. if (n == models_count) return;
  401. LLVector4a min = model_list[n]->mVolumeFaces[0].mExtents[0];
  402. LLVector4a max = model_list[n]->mVolumeFaces[0].mExtents[1];
  403. // Treat models as a group: each model out of 1x1 cube needs scaling and
  404. // will affect whole group scale.
  405. while (n < models_count)
  406. {
  407. LLModel* model = model_list[n++].get();
  408. if (model && !model->mVolumeFaces.empty())
  409. {
  410. // For all of the volume faces in the model, loop over them and see
  411. // what the extents of the volume along each axis.
  412. for (S32 i = 1, count = model->mVolumeFaces.size(); i < count; ++i)
  413. {
  414. LLVolumeFace& face = model->mVolumeFaces[i];
  415. update_min_max(min, max, face.mExtents[0]);
  416. update_min_max(min, max, face.mExtents[1]);
  417. if (face.mTexCoords)
  418. {
  419. LLVector2& min_tc = face.mTexCoordExtents[0];
  420. LLVector2& max_tc = face.mTexCoordExtents[1];
  421. min_tc = face.mTexCoords[0];
  422. max_tc = face.mTexCoords[0];
  423. for (S32 j = 1; j < face.mNumVertices; ++j)
  424. {
  425. update_min_max(min_tc, max_tc, face.mTexCoords[j]);
  426. }
  427. }
  428. else
  429. {
  430. face.mTexCoordExtents[0].set(0, 0);
  431. face.mTexCoordExtents[1].set(1, 1);
  432. }
  433. }
  434. }
  435. }
  436. // Now that we have the extents of the model, we can compute the offset
  437. // needed to center the model at the origin.
  438. // Compute center of the model and make it negative to get translation
  439. // needed to center at origin.
  440. LLVector4a trans;
  441. trans.setAdd(min, max);
  442. trans.mul(-0.5f);
  443. // Compute the total size along all axes of the model.
  444. LLVector4a size;
  445. size.setSub(max, min);
  446. // Prevent division by zero.
  447. F32 x = size[0];
  448. F32 y = size[1];
  449. F32 z = size[2];
  450. F32 w = size[3];
  451. if (fabs(x) < F_APPROXIMATELY_ZERO)
  452. {
  453. x = 1.f;
  454. }
  455. if (fabs(y) < F_APPROXIMATELY_ZERO)
  456. {
  457. y = 1.f;
  458. }
  459. if (fabs(z) < F_APPROXIMATELY_ZERO)
  460. {
  461. z = 1.f;
  462. }
  463. size.set(x, y, z, w);
  464. // Compute scale as reciprocal of size
  465. LLVector4a scale;
  466. scale.splat(1.f);
  467. scale.div(size);
  468. LLVector4a inv_scale(1.f);
  469. inv_scale.div(scale);
  470. n = 0;
  471. while (n < models_count)
  472. {
  473. LLModel* model = model_list[n++].get();
  474. if (!model || model->mVolumeFaces.empty()) continue;
  475. for (S32 i = 0, count = model->mVolumeFaces.size(); i < count; ++i)
  476. {
  477. LLVolumeFace& face = model->mVolumeFaces[i];
  478. // We shrink the extents so that they fall within the unit cube.
  479. face.mExtents[0].add(trans);
  480. face.mExtents[0].mul(scale);
  481. face.mExtents[1].add(trans);
  482. face.mExtents[1].mul(scale);
  483. // For all the positions, we scale the positions to fit within the
  484. // unit cube.
  485. LLVector4a* pos = (LLVector4a*)face.mPositions;
  486. LLVector4a* norm = (LLVector4a*)face.mNormals;
  487. LLVector4a* t = (LLVector4a*)face.mTangents;
  488. for (S32 j = 0; j < face.mNumVertices; ++j)
  489. {
  490. pos[j].add(trans);
  491. pos[j].mul(scale);
  492. if (norm && !norm[j].equals3(LLVector4a::getZero()))
  493. {
  494. norm[j].mul(inv_scale);
  495. norm[j].normalize3();
  496. }
  497. if (t)
  498. {
  499. F32 w = t[j].getF32ptr()[3];
  500. t[j].mul(inv_scale);
  501. t[j].normalize3();
  502. t[j].getF32ptr()[3] = w;
  503. }
  504. }
  505. }
  506. // mNormalizedScale is the scale at which we would need to multiply the
  507. // model by to get the original size of the model instead of the
  508. // normalized size.
  509. LLVector4a normalized_scale;
  510. normalized_scale.splat(1.f);
  511. normalized_scale.div(scale);
  512. model->mNormalizedScale.set(normalized_scale.getF32ptr());
  513. model->mNormalizedTranslation.set(trans.getF32ptr());
  514. model->mNormalizedTranslation *= -1.f;
  515. }
  516. }
  517. #endif
  518. // Shrink the model to fit on a 1x1x1 cube centered at the origin. The
  519. // positions and extents multiplied by mNormalizedScale and offset by
  520. // mNormalizedTranslation to be the "original" extents and position. Also, the
  521. // positions will fit within the unit cube.
  522. void LLModel::normalizeVolumeFaces()
  523. {
  524. // Ensure we do not have too many faces
  525. if ((S32)mVolumeFaces.size() > LL_SCULPT_MESH_MAX_FACES)
  526. {
  527. mVolumeFaces.resize(LL_SCULPT_MESH_MAX_FACES);
  528. }
  529. if (!mVolumeFaces.empty())
  530. {
  531. LLVector4a min, max;
  532. // For all of the volume faces in the model, loop over them and see
  533. // what the extents of the volume along each axis.
  534. min = mVolumeFaces[0].mExtents[0];
  535. max = mVolumeFaces[0].mExtents[1];
  536. for (S32 i = 1, count = mVolumeFaces.size(); i < count; ++i)
  537. {
  538. LLVolumeFace& face = mVolumeFaces[i];
  539. update_min_max(min, max, face.mExtents[0]);
  540. update_min_max(min, max, face.mExtents[1]);
  541. if (face.mTexCoords)
  542. {
  543. LLVector2& min_tc = face.mTexCoordExtents[0];
  544. LLVector2& max_tc = face.mTexCoordExtents[1];
  545. min_tc = face.mTexCoords[0];
  546. max_tc = face.mTexCoords[0];
  547. for (S32 j = 1; j < face.mNumVertices; ++j)
  548. {
  549. update_min_max(min_tc, max_tc, face.mTexCoords[j]);
  550. }
  551. }
  552. else
  553. {
  554. face.mTexCoordExtents[0].set(0, 0);
  555. face.mTexCoordExtents[1].set(1, 1);
  556. }
  557. }
  558. // Now that we have the extents of the model, we can compute the offset
  559. // needed to center the model at the origin.
  560. // Compute center of the model and make it negative to get translation
  561. // needed to center at origin.
  562. LLVector4a trans;
  563. trans.setAdd(min, max);
  564. trans.mul(-0.5f);
  565. // Compute the total size along all axes of the model.
  566. LLVector4a size;
  567. size.setSub(max, min);
  568. // Prevent division by zero.
  569. F32 x = size[0];
  570. F32 y = size[1];
  571. F32 z = size[2];
  572. F32 w = size[3];
  573. if (fabs(x) < F_APPROXIMATELY_ZERO)
  574. {
  575. x = 1.f;
  576. }
  577. if (fabs(y) < F_APPROXIMATELY_ZERO)
  578. {
  579. y = 1.f;
  580. }
  581. if (fabs(z) < F_APPROXIMATELY_ZERO)
  582. {
  583. z = 1.f;
  584. }
  585. size.set(x, y, z, w);
  586. // Compute scale as reciprocal of size
  587. LLVector4a scale;
  588. scale.splat(1.f);
  589. scale.div(size);
  590. LLVector4a inv_scale(1.f);
  591. inv_scale.div(scale);
  592. for (S32 i = 0, count = mVolumeFaces.size(); i < count; ++i)
  593. {
  594. LLVolumeFace& face = mVolumeFaces[i];
  595. // We shrink the extents so that they fall within the unit cube.
  596. face.mExtents[0].add(trans);
  597. face.mExtents[0].mul(scale);
  598. face.mExtents[1].add(trans);
  599. face.mExtents[1].mul(scale);
  600. // For all the positions, we scale the positions to fit within the
  601. // unit cube.
  602. LLVector4a* pos = (LLVector4a*)face.mPositions;
  603. LLVector4a* norm = (LLVector4a*)face.mNormals;
  604. LLVector4a* t = (LLVector4a*)face.mTangents;
  605. for (S32 j = 0; j < face.mNumVertices; ++j)
  606. {
  607. pos[j].add(trans);
  608. pos[j].mul(scale);
  609. if (norm && !norm[j].equals3(LLVector4a::getZero()))
  610. {
  611. norm[j].mul(inv_scale);
  612. norm[j].normalize3();
  613. }
  614. if (t)
  615. {
  616. F32 w = t[j].getF32ptr()[3];
  617. t[j].mul(inv_scale);
  618. t[j].normalize3();
  619. t[j].getF32ptr()[3] = w;
  620. }
  621. }
  622. }
  623. // mNormalizedScale is the scale at which we would need to multiply the
  624. // model by to get the original size of the model instead of the
  625. // normalized size.
  626. LLVector4a normalized_scale;
  627. normalized_scale.splat(1.f);
  628. normalized_scale.div(scale);
  629. mNormalizedScale.set(normalized_scale.getF32ptr());
  630. mNormalizedTranslation.set(trans.getF32ptr());
  631. mNormalizedTranslation *= -1.f;
  632. // Remember normalized scale so original dimensions can be recovered
  633. // for mesh processing (i.e. tangent generation)
  634. for (S32 i = 0, count = mVolumeFaces.size(); i < count; ++i)
  635. {
  636. mVolumeFaces[i].mNormalizedScale = mNormalizedScale;
  637. }
  638. }
  639. }
  640. void LLModel::getNormalizedScaleTranslation(LLVector3& scale_out,
  641. LLVector3& translation_out)
  642. {
  643. scale_out = mNormalizedScale;
  644. translation_out = mNormalizedTranslation;
  645. }
  646. void LLModel::setNumVolumeFaces(S32 count)
  647. {
  648. mVolumeFaces.resize(count);
  649. }
  650. void LLModel::setVolumeFaceData(S32 f, LLStrider<LLVector3> pos,
  651. LLStrider<LLVector3> norm,
  652. LLStrider<LLVector2> tc,
  653. LLStrider<U16> ind, U32 num_verts,
  654. U32 num_indices)
  655. {
  656. LLVolumeFace& face = mVolumeFaces[f];
  657. face.resizeVertices(num_verts);
  658. face.resizeIndices(num_indices);
  659. LLVector4a::memcpyNonAliased16((F32*)face.mPositions, (F32*)pos.get(),
  660. num_verts * 4 * sizeof(F32));
  661. if (norm.get())
  662. {
  663. LLVector4a::memcpyNonAliased16((F32*)face.mNormals, (F32*)norm.get(),
  664. num_verts * 4 * sizeof(F32));
  665. }
  666. else
  667. {
  668. // NOTE: normals are part of the same buffer as mPositions, do not free
  669. // them separately.
  670. face.mNormals = NULL;
  671. }
  672. if (tc.get())
  673. {
  674. U32 tex_size = (num_verts * 2 * sizeof(F32) + 0xF) & ~0xF;
  675. LLVector4a::memcpyNonAliased16((F32*)face.mTexCoords, (F32*)tc.get(),
  676. tex_size);
  677. }
  678. else
  679. {
  680. // NOTE: texture coordinates are part of the same buffer as mPositions,
  681. // do not free them separately.
  682. face.mTexCoords = NULL;
  683. }
  684. U32 size = (num_indices * 2 + 0xF) & ~0xF;
  685. LLVector4a::memcpyNonAliased16((F32*)face.mIndices, (F32*)ind.get(), size);
  686. }
  687. void LLModel::addFace(const LLVolumeFace& face)
  688. {
  689. if (face.mNumVertices == 0)
  690. {
  691. llerrs << "Cannot add empty face." << llendl;
  692. }
  693. mVolumeFaces.emplace_back(face);
  694. if (mVolumeFaces.size() > MAX_MODEL_FACES)
  695. {
  696. llerrs << "Model prims cannot have more than " << MAX_MODEL_FACES
  697. << " faces !" << llendl;
  698. }
  699. }
  700. void LLModel::generateNormals(F32 angle_cutoff)
  701. {
  702. // Generate normals for all faces by:
  703. // 1 - Create faceted copy of face with no texture coordinates
  704. // 2 - Weld vertices in faceted copy that are shared between triangles with
  705. // less than "angle_cutoff" difference between normals
  706. // 3 - Generate smoothed set of normals based on welding results
  707. // 4 - Create faceted copy of face with texture coordinates
  708. // 5 - Copy smoothed normals to faceted copy, using closest normal to
  709. // triangle normal where more than one normal exists for a given
  710. // position
  711. // 6 - Remove redundant vertices from new faceted (now smooth) copy
  712. angle_cutoff = cosf(angle_cutoff);
  713. for (U32 j = 0; j < mVolumeFaces.size(); ++j)
  714. {
  715. LLVolumeFace& vol_face = mVolumeFaces[j];
  716. if (vol_face.mNumIndices > 65535)
  717. {
  718. llwarns << "Too many vertices for normal generation to work."
  719. << llendl;
  720. continue;
  721. }
  722. // Create faceted copy of current face with no texture coordinates
  723. // (step 1)
  724. LLVolumeFace faceted;
  725. LLVector4a* src_pos = (LLVector4a*)vol_face.mPositions;
  726. //LLVector4a* src_norm = (LLVector4a*)vol_face.mNormals;
  727. faceted.resizeVertices(vol_face.mNumIndices);
  728. faceted.resizeIndices(vol_face.mNumIndices);
  729. // bake out triangles into temporary face, clearing texture coordinates
  730. for (S32 i = 0; i < vol_face.mNumIndices; ++i)
  731. {
  732. U32 idx = vol_face.mIndices[i];
  733. faceted.mPositions[i] = src_pos[idx];
  734. faceted.mTexCoords[i].clear();
  735. faceted.mIndices[i] = i;
  736. }
  737. LLVector4a lhs, rhs;
  738. // Generate normals for temporary face
  739. for (S32 i = 0; i < faceted.mNumIndices; i += 3)
  740. {
  741. // For each triangle
  742. U16 i0 = faceted.mIndices[i];
  743. U16 i1 = faceted.mIndices[i + 1];
  744. U16 i2 = faceted.mIndices[i + 2];
  745. LLVector4a& p0 = faceted.mPositions[i0];
  746. LLVector4a& p1 = faceted.mPositions[i1];
  747. LLVector4a& p2 = faceted.mPositions[i2];
  748. LLVector4a& n0 = faceted.mNormals[i0];
  749. LLVector4a& n1 = faceted.mNormals[i1];
  750. LLVector4a& n2 = faceted.mNormals[i2];
  751. lhs.setSub(p1, p0);
  752. rhs.setSub(p2, p0);
  753. n0.setCross3(lhs, rhs);
  754. n0.normalize3();
  755. n1 = n0;
  756. n2 = n0;
  757. }
  758. // Weld vertices in temporary face, respecting angle_cutoff (step 2)
  759. faceted.optimize(angle_cutoff);
  760. // Generate normals for welded face based on new topology (step 3)
  761. for (S32 i = 0; i < faceted.mNumVertices; ++i)
  762. {
  763. faceted.mNormals[i].clear();
  764. }
  765. LLVector4a n;
  766. for (S32 i = 0; i < faceted.mNumIndices; i += 3)
  767. {
  768. // For each triangle
  769. U16 i0 = faceted.mIndices[i];
  770. U16 i1 = faceted.mIndices[i + 1];
  771. U16 i2 = faceted.mIndices[i + 2];
  772. LLVector4a& p0 = faceted.mPositions[i0];
  773. LLVector4a& p1 = faceted.mPositions[i1];
  774. LLVector4a& p2 = faceted.mPositions[i2];
  775. LLVector4a& n0 = faceted.mNormals[i0];
  776. LLVector4a& n1 = faceted.mNormals[i1];
  777. LLVector4a& n2 = faceted.mNormals[i2];
  778. LLVector4a lhs, rhs;
  779. lhs.setSub(p1, p0);
  780. rhs.setSub(p2, p0);
  781. n.setCross3(lhs, rhs);
  782. n0.add(n);
  783. n1.add(n);
  784. n2.add(n);
  785. }
  786. // Normalize normals and build point map
  787. LLVolumeFace::VertexMapData::PointMap point_map;
  788. for (S32 i = 0; i < faceted.mNumVertices; ++i)
  789. {
  790. faceted.mNormals[i].normalize3();
  791. LLVolumeFace::VertexMapData v;
  792. v.setPosition(faceted.mPositions[i]);
  793. v.setNormal(faceted.mNormals[i]);
  794. point_map[LLVector3(v.getPosition().getF32ptr())].push_back(v);
  795. }
  796. // Create faceted copy of current face with texture coordinates
  797. // (step 4)
  798. LLVolumeFace new_face;
  799. // Bake out triangles into new face
  800. new_face.resizeIndices(vol_face.mNumIndices);
  801. new_face.resizeVertices(vol_face.mNumIndices);
  802. for (S32 i = 0; i < vol_face.mNumIndices; ++i)
  803. {
  804. U32 idx = vol_face.mIndices[i];
  805. LLVolumeFace::VertexData v;
  806. new_face.mPositions[i] = vol_face.mPositions[idx];
  807. new_face.mNormals[i].clear();
  808. new_face.mIndices[i] = i;
  809. }
  810. if (vol_face.mTexCoords)
  811. {
  812. for (S32 i = 0; i < vol_face.mNumIndices; ++i)
  813. {
  814. U32 idx = vol_face.mIndices[i];
  815. new_face.mTexCoords[i] = vol_face.mTexCoords[idx];
  816. }
  817. }
  818. else
  819. {
  820. // NOTE: texture coordinates are part of the same buffer as
  821. // mPositions, do not free them separately.
  822. new_face.mTexCoords = NULL;
  823. }
  824. // Generate normals for new face
  825. for (S32 i = 0; i < new_face.mNumIndices; i += 3)
  826. {
  827. // For each triangle
  828. U16 i0 = new_face.mIndices[i];
  829. U16 i1 = new_face.mIndices[i + 1];
  830. U16 i2 = new_face.mIndices[i + 2];
  831. LLVector4a& p0 = new_face.mPositions[i0];
  832. LLVector4a& p1 = new_face.mPositions[i1];
  833. LLVector4a& p2 = new_face.mPositions[i2];
  834. LLVector4a& n0 = new_face.mNormals[i0];
  835. LLVector4a& n1 = new_face.mNormals[i1];
  836. LLVector4a& n2 = new_face.mNormals[i2];
  837. LLVector4a lhs, rhs;
  838. lhs.setSub(p1, p0);
  839. rhs.setSub(p2, p0);
  840. n0.setCross3(lhs, rhs);
  841. n0.normalize3();
  842. n1 = n0;
  843. n2 = n0;
  844. }
  845. // Swap out normals in new_face with best match from point map (step 5)
  846. for (S32 i = 0; i < new_face.mNumVertices; ++i)
  847. {
  848. LLVolumeFace::VertexMapData::PointMap::iterator iter =
  849. point_map.find(LLVector3(new_face.mPositions[i].getF32ptr()));
  850. if (iter != point_map.end())
  851. {
  852. LLVector4a ref_norm = new_face.mNormals[i];
  853. F32 best = -2.f;
  854. for (S32 k = 0, count = iter->second.size(); k < count; ++k)
  855. {
  856. LLVector4a& n = iter->second[k].getNormal();
  857. F32 cur = n.dot3(ref_norm).getF32();
  858. if (cur > best)
  859. {
  860. best = cur;
  861. new_face.mNormals[i] = n;
  862. }
  863. }
  864. }
  865. }
  866. // Remove redundant vertices from new face (step 6)
  867. new_face.optimize();
  868. mVolumeFaces[j] = new_face;
  869. }
  870. }
  871. // Used to be a validate_model(const LLModel* mdl) global function. HB
  872. bool LLModel::validate(bool check_nans) const
  873. {
  874. S32 count = getNumVolumeFaces();
  875. if (count <= 0)
  876. {
  877. llwarns << "Model has no faces !" << llendl;
  878. return false;
  879. }
  880. for (S32 i = 0; i < count; ++i)
  881. {
  882. const LLVolumeFace& vol_face = getVolumeFace(i);
  883. if (vol_face.mNumVertices == 0)
  884. {
  885. llwarns << "Face has no vertices." << llendl;
  886. return false;
  887. }
  888. if (vol_face.mNumIndices == 0)
  889. {
  890. llwarns << "Face has no indices." << llendl;
  891. return false;
  892. }
  893. if (!vol_face.validate(check_nans))
  894. {
  895. return false;
  896. }
  897. }
  898. return true;
  899. }
  900. //static
  901. LLSD LLModel::writeModel(std::ostream& ostr, LLModel* physics, LLModel* high,
  902. LLModel* medium, LLModel* low, LLModel* impostor,
  903. const LLModel::Decomposition& decomp,
  904. bool upload_skin, bool upload_joints,
  905. bool lock_scale_if_joint_position,
  906. bool nowrite, bool as_slm, S32 submodel_id)
  907. {
  908. LLSD mdl;
  909. LLModel* model[] =
  910. {
  911. impostor,
  912. low,
  913. medium,
  914. high,
  915. physics
  916. };
  917. bool skinning = upload_skin && high && !high->mSkinWeights.empty();
  918. if (skinning)
  919. {
  920. // Write skinning block
  921. mdl["skin"] = high->mSkinInfo.asLLSD(upload_joints,
  922. lock_scale_if_joint_position);
  923. }
  924. if (!decomp.mBaseHull.empty() || !decomp.mHull.empty())
  925. {
  926. mdl["physics_convex"] = decomp.asLLSD();
  927. if (!decomp.mHull.empty() && !as_slm)
  928. {
  929. // Convex decomposition exists, physics mesh will not be used
  930. // (unless this is an slm file)
  931. model[LLModel::LOD_PHYSICS] = NULL;
  932. }
  933. }
  934. else if (submodel_id)
  935. {
  936. const LLModel::Decomposition fake_decomp;
  937. mdl["secondary"] = true;
  938. mdl["submodel_id"] = submodel_id;
  939. mdl["physics_convex"] = fake_decomp.asLLSD();
  940. model[LLModel::LOD_PHYSICS] = NULL;
  941. }
  942. if (as_slm)
  943. {
  944. // Save material list names
  945. for (U32 i = 0; i < high->mMaterialList.size(); ++i)
  946. {
  947. mdl["material_list"][i] = high->mMaterialList[i];
  948. }
  949. }
  950. for (S32 idx = 0; idx < MODEL_NAMES_LENGTH; ++idx)
  951. {
  952. LLModel* modelp = model[idx];
  953. if (!modelp || !modelp->getNumVolumeFaces() ||
  954. !modelp->getVolumeFace(0).mPositions)
  955. {
  956. llwarns << "Invalid model at index " << idx << ". Skipping."
  957. << llendl;
  958. continue;
  959. }
  960. LLVector3 min_pos(modelp->getVolumeFace(0).mPositions[0].getF32ptr());
  961. LLVector3 max_pos = min_pos;
  962. // Find position domain
  963. for (S32 i = 0; i < modelp->getNumVolumeFaces(); ++i)
  964. {
  965. const LLVolumeFace& face = modelp->getVolumeFace(i);
  966. for (S32 j = 0; j < face.mNumVertices; ++j)
  967. {
  968. update_min_max(min_pos, max_pos,
  969. face.mPositions[j].getF32ptr());
  970. }
  971. }
  972. LLVector3 pos_range = max_pos - min_pos;
  973. for (S32 i = 0; i < modelp->getNumVolumeFaces(); ++i)
  974. {
  975. const LLVolumeFace& face = modelp->getVolumeFace(i);
  976. if (face.mNumVertices < 3)
  977. {
  978. // Do not export an empty face
  979. mdl[model_names[idx]][i]["NoGeometry"] = true;
  980. continue;
  981. }
  982. S32 vertices = face.mNumVertices;
  983. LLSD::Binary verts(vertices * 6);
  984. LLSD::Binary tc(vertices * 4);
  985. LLSD::Binary normals(vertices * 6);
  986. LLSD::Binary indices(face.mNumIndices * 2);
  987. #if LL_USE_TANGENTS
  988. LLSD::Binary tangents(face.mNumVertices * 8);
  989. #endif
  990. LLVector2* ftc = (LLVector2*)face.mTexCoords;
  991. LLVector2 min_tc;
  992. LLVector2 max_tc;
  993. if (ftc)
  994. {
  995. min_tc = ftc[0];
  996. max_tc = min_tc;
  997. // Get texture coordinate domain
  998. for (S32 j = 0; j < vertices; ++j)
  999. {
  1000. update_min_max(min_tc, max_tc, ftc[j]);
  1001. }
  1002. }
  1003. U32 vert_idx = 0;
  1004. U32 norm_idx = 0;
  1005. U32 tc_idx = 0;
  1006. #if LL_USE_TANGENTS
  1007. U32 tan_idx = 0;
  1008. #endif
  1009. LLVector2 tc_range = max_tc - min_tc;
  1010. for (S32 j = 0; j < vertices; ++j)
  1011. {
  1012. // For each vertex...
  1013. F32* pos = face.mPositions[j].getF32ptr();
  1014. // Position
  1015. for (U32 k = 0; k < 3; ++k)
  1016. {
  1017. // For each component...
  1018. // Convert to 16-bit normalized across domain
  1019. U16 val = (U16)((pos[k] - min_pos.mV[k]) /
  1020. pos_range.mV[k] * 65535);
  1021. // Write to binary buffer
  1022. U8* buff = (U8*)&val;
  1023. verts[vert_idx++] = buff[0];
  1024. verts[vert_idx++] = buff[1];
  1025. }
  1026. if (face.mNormals)
  1027. {
  1028. F32* norm = face.mNormals[j].getF32ptr();
  1029. for (U32 k = 0; k < 3; ++k)
  1030. {
  1031. // For each component convert to 16 bits normalized
  1032. constexpr F32 norm_factor = 0.5f * 65535.f;
  1033. U16 val = (U16)((norm[k] + 1.f) * norm_factor);
  1034. U8* buff = (U8*)&val;
  1035. // Write to binary buffer
  1036. normals[norm_idx++] = buff[0];
  1037. normals[norm_idx++] = buff[1];
  1038. }
  1039. }
  1040. #if LL_USE_TANGENTS
  1041. if (face.mTangents)
  1042. {
  1043. F32* tangent = face.mTangents[j].getF32ptr();
  1044. for (U32 k = 0; k < 4; ++k)
  1045. {
  1046. // For each component...
  1047. // Convert to 16-bit normalized
  1048. U16 val = (U16)((tangent[k] +1.f) * 0.5f * 65535.f);
  1049. // Write to binary buffer
  1050. U8* buff = (U8*)&val;
  1051. tangents[tan_idx++] = buff[0];
  1052. tangents[tan_idx++] = buff[1];
  1053. }
  1054. }
  1055. #endif
  1056. if (face.mTexCoords)
  1057. {
  1058. F32* src_tc = (F32*)face.mTexCoords[j].mV;
  1059. for (U32 k = 0; k < 2; ++k)
  1060. {
  1061. // For each component...
  1062. // Convert to 16-bit normalized
  1063. U16 val = (U16)((src_tc[k] - min_tc.mV[k]) /
  1064. tc_range.mV[k] * 65535.f);
  1065. // Write to binary buffer
  1066. U8* buff = (U8*)&val;
  1067. tc[tc_idx++] = buff[0];
  1068. tc[tc_idx++] = buff[1];
  1069. }
  1070. }
  1071. }
  1072. for (S32 j = 0, idx_idx = 0; j < face.mNumIndices; ++j)
  1073. {
  1074. U8* buff = (U8*)&(face.mIndices[j]);
  1075. indices[idx_idx++] = buff[0];
  1076. indices[idx_idx++] = buff[1];
  1077. }
  1078. // Write out face data
  1079. mdl[model_names[idx]][i]["PositionDomain"]["Min"] =
  1080. min_pos.getValue();
  1081. mdl[model_names[idx]][i]["PositionDomain"]["Max"] =
  1082. max_pos.getValue();
  1083. mdl[model_names[idx]][i]["NormalizedScale"] =
  1084. face.mNormalizedScale.getValue();
  1085. mdl[model_names[idx]][i]["Position"] = verts;
  1086. if (face.mNormals)
  1087. {
  1088. mdl[model_names[idx]][i]["Normal"] = normals;
  1089. }
  1090. #if LL_USE_TANGENTS
  1091. if (face.mTangents)
  1092. {
  1093. mdl[model_names[idx]][i]["Tangent"] = tangents;
  1094. }
  1095. #endif
  1096. if (face.mTexCoords)
  1097. {
  1098. mdl[model_names[idx]][i]["TexCoord0Domain"]["Min"] =
  1099. min_tc.getValue();
  1100. mdl[model_names[idx]][i]["TexCoord0Domain"]["Max"] =
  1101. max_tc.getValue();
  1102. mdl[model_names[idx]][i]["TexCoord0"] = tc;
  1103. }
  1104. mdl[model_names[idx]][i]["TriangleList"] = indices;
  1105. if (skinning)
  1106. {
  1107. if (!modelp->mSkinWeights.empty())
  1108. {
  1109. // Write out skin weights
  1110. // Each influence list entry is up to four 24 bits values:
  1111. // first 8 bits is bone index, last 16 bits is bone
  1112. // influence weight; a bone index of 0xFF signifies no more
  1113. // influences for this vertex.
  1114. std::stringstream ostr;
  1115. for (S32 j = 0; j < vertices; ++j)
  1116. {
  1117. LLVector3 pos(face.mPositions[j].getF32ptr());
  1118. weight_list& weights =
  1119. model[idx]->getJointInfluences(pos);
  1120. S32 count = 0;
  1121. for (weight_list::iterator iter = weights.begin();
  1122. iter != weights.end(); ++iter)
  1123. {
  1124. if (iter->mJointIdx < 255 &&
  1125. iter->mJointIdx >= 0)
  1126. {
  1127. U8 idx = (U8)iter->mJointIdx;
  1128. ostr.write((const char*)&idx, 1);
  1129. U16 influence = (U16)(iter->mWeight * 65535);
  1130. ostr.write((const char*)&influence, 2);
  1131. ++count;
  1132. }
  1133. }
  1134. U8 end_list = 0xFF;
  1135. if (count < 4)
  1136. {
  1137. ostr.write((const char*)&end_list, 1);
  1138. }
  1139. }
  1140. // Copy ostr to binary buffer
  1141. std::string data = ostr.str();
  1142. const U8* buff = (U8*)data.data();
  1143. U32 bytes = data.size();
  1144. LLSD::Binary w(bytes);
  1145. for (U32 j = 0; j < bytes; ++j)
  1146. {
  1147. w[j] = buff[j];
  1148. }
  1149. mdl[model_names[idx]][i]["Weights"] = w;
  1150. }
  1151. else if (idx != LLModel::LOD_PHYSICS)
  1152. {
  1153. llwarns << "Attempting to use skinning without having skin weights"
  1154. << llendl;
  1155. }
  1156. }
  1157. }
  1158. }
  1159. return writeModelToStream(ostr, mdl, nowrite, as_slm);
  1160. }
  1161. LLSD LLModel::writeModelToStream(std::ostream& ostr, LLSD& mdl, bool nowrite,
  1162. bool as_slm)
  1163. {
  1164. std::string::size_type cur_offset = 0;
  1165. LLSD header;
  1166. if (as_slm && mdl.has("material_list"))
  1167. {
  1168. // Save material binding names to header
  1169. header["material_list"] = mdl["material_list"];
  1170. }
  1171. std::string skin;
  1172. if (mdl.has("skin"))
  1173. {
  1174. // write out skin block
  1175. skin = zip_llsd(mdl["skin"]);
  1176. U32 size = skin.size();
  1177. if (size > 0)
  1178. {
  1179. header["skin"]["offset"] = (LLSD::Integer)cur_offset;
  1180. header["skin"]["size"] = (LLSD::Integer)size;
  1181. cur_offset += size;
  1182. }
  1183. }
  1184. std::string decomposition;
  1185. if (mdl.has("physics_convex"))
  1186. {
  1187. // Write out convex decomposition
  1188. decomposition = zip_llsd(mdl["physics_convex"]);
  1189. U32 size = decomposition.size();
  1190. if (size > 0)
  1191. {
  1192. header["physics_convex"]["offset"] = (LLSD::Integer)cur_offset;
  1193. header["physics_convex"]["size"] = (LLSD::Integer)size;
  1194. cur_offset += size;
  1195. }
  1196. }
  1197. if (mdl.has("submodel_id"))
  1198. {
  1199. // Xrite out submodel id
  1200. header["submodel_id"] = (LLSD::Integer)mdl["submodel_id"];
  1201. }
  1202. std::string out[MODEL_NAMES_LENGTH];
  1203. for (S32 i = 0; i < MODEL_NAMES_LENGTH; i++)
  1204. {
  1205. if (mdl.has(model_names[i]))
  1206. {
  1207. out[i] = zip_llsd(mdl[model_names[i]]);
  1208. U32 size = out[i].size();
  1209. header[model_names[i]]["offset"] = (LLSD::Integer)cur_offset;
  1210. header[model_names[i]]["size"] = (LLSD::Integer)size;
  1211. cur_offset += size;
  1212. }
  1213. }
  1214. if (!nowrite)
  1215. {
  1216. LLSDSerialize::toBinary(header, ostr);
  1217. if (!skin.empty())
  1218. {
  1219. // Write skin block
  1220. ostr.write((const char*)skin.data(),
  1221. header["skin"]["size"].asInteger());
  1222. }
  1223. if (!decomposition.empty())
  1224. {
  1225. // Write decomposition block
  1226. ostr.write((const char*)decomposition.data(),
  1227. header["physics_convex"]["size"].asInteger());
  1228. }
  1229. for (S32 i = 0; i < MODEL_NAMES_LENGTH; i++)
  1230. {
  1231. if (!out[i].empty())
  1232. {
  1233. ostr.write((const char*)out[i].data(),
  1234. header[model_names[i]]["size"].asInteger());
  1235. }
  1236. }
  1237. }
  1238. return header;
  1239. }
  1240. LLModel::weight_list& LLModel::getJointInfluences(const LLVector3& pos)
  1241. {
  1242. // 1. If a vertex has been weighted then we will find it via pos and return
  1243. // its weight list
  1244. for (weight_map::iterator it = mSkinWeights.begin(),
  1245. end = mSkinWeights.end();
  1246. it != end; ++it)
  1247. {
  1248. if (jointPositionalLookup(it->first, pos))
  1249. {
  1250. return it->second;
  1251. }
  1252. }
  1253. // 2. Otherwise we will use the older implementation
  1254. weight_map::iterator iter = mSkinWeights.find(pos);
  1255. if (iter != mSkinWeights.end())
  1256. {
  1257. if ((iter->first - pos).length() <= 0.1f)
  1258. {
  1259. return iter->second;
  1260. }
  1261. llwarns << "Could not find weight list for matching joint ! This is an error !"
  1262. << llendl;
  1263. llassert(false);
  1264. // For release viewers, fall back to something acceptable instead
  1265. // of crashing...
  1266. }
  1267. // No exact match found, get closest point
  1268. constexpr F32 epsilon = 1e-5f;
  1269. weight_map::iterator iter_down;
  1270. weight_map::iterator iter_up = mSkinWeights.lower_bound(pos);
  1271. if (iter_up == mSkinWeights.end())
  1272. {
  1273. iter_down = iter_up--;
  1274. }
  1275. else
  1276. {
  1277. iter_down = ++iter_up;
  1278. }
  1279. weight_map::iterator best = iter_up;
  1280. F32 min_dist = (best->first - pos).length();
  1281. // Search up and down mSkinWeights from lower bound of pos until a match is
  1282. // found within epsilon. If no match is found within epsilon, return
  1283. // closest match.
  1284. bool done = false;
  1285. while (!done)
  1286. {
  1287. done = true;
  1288. if (iter_up != mSkinWeights.end() && ++iter_up != mSkinWeights.end())
  1289. {
  1290. done = false;
  1291. F32 dist = (iter_up->first - pos).length();
  1292. if (dist < epsilon)
  1293. {
  1294. return iter_up->second;
  1295. }
  1296. if (dist < min_dist)
  1297. {
  1298. best = iter_up;
  1299. min_dist = dist;
  1300. }
  1301. }
  1302. if (iter_down != mSkinWeights.begin() &&
  1303. --iter_down != mSkinWeights.begin())
  1304. {
  1305. done = false;
  1306. F32 dist = (iter_down->first - pos).length();
  1307. if (dist < epsilon)
  1308. {
  1309. return iter_down->second;
  1310. }
  1311. if (dist < min_dist)
  1312. {
  1313. best = iter_down;
  1314. min_dist = dist;
  1315. }
  1316. }
  1317. }
  1318. return best->second;
  1319. }
  1320. void LLModel::setConvexHullDecomposition(const LLModel::hull_decomp& decomp)
  1321. {
  1322. mPhysics.mHull = decomp;
  1323. mPhysics.mMesh.clear();
  1324. updateHullCenters();
  1325. }
  1326. void LLModel::updateHullCenters()
  1327. {
  1328. mHullCenter.resize(mPhysics.mHull.size());
  1329. mHullPoints = 0;
  1330. mCenterOfHullCenters.clear();
  1331. for (U32 i = 0, count = mPhysics.mHull.size(); i < count; ++i)
  1332. {
  1333. U32 count2 = mPhysics.mHull[i].size();
  1334. LLVector3 cur_center;
  1335. for (U32 j = 0; j < count2; ++j)
  1336. {
  1337. cur_center += mPhysics.mHull[i][j];
  1338. }
  1339. mCenterOfHullCenters += cur_center;
  1340. cur_center *= 1.f / count2;
  1341. mHullCenter[i] = cur_center;
  1342. mHullPoints += count2;
  1343. }
  1344. if (mHullPoints > 0)
  1345. {
  1346. mCenterOfHullCenters *= 1.f / mHullPoints;
  1347. llassert(mPhysics.hasHullList());
  1348. }
  1349. }
  1350. bool LLModel::loadModel(std::istream& is)
  1351. {
  1352. mSculptLevel = -1; // default is an error occured
  1353. LLSD header;
  1354. {
  1355. if (!LLSDSerialize::fromBinary(header, is, 1024 * 1024 * 1024))
  1356. {
  1357. llwarns << "Mesh header parse error. Not a valid mesh asset !"
  1358. << llendl;
  1359. return false;
  1360. }
  1361. }
  1362. if (header.has("material_list"))
  1363. {
  1364. // Load material list names
  1365. mMaterialList.clear();
  1366. for (S32 i = 0, count = header["material_list"].size(); i < count; ++i)
  1367. {
  1368. mMaterialList.emplace_back(header["material_list"][i].asString());
  1369. }
  1370. }
  1371. mSubmodelID = header.has("submodel_id") ? header["submodel_id"].asInteger()
  1372. : 0;
  1373. // 4 mesh LODs (from 0 to 3) + 1 physical (4)
  1374. constexpr S32 MODEL_MAX_LOD = 4;
  1375. S32 lod = llclamp((S32)mDetail, 0, MODEL_MAX_LOD);
  1376. if (header[model_names[lod]]["offset"].asInteger() == -1 ||
  1377. header[model_names[lod]]["size"].asInteger() == 0)
  1378. {
  1379. // Cannot load requested LOD
  1380. llwarns << "LoD data is invalid !" << llendl;
  1381. return false;
  1382. }
  1383. bool has_skin = header["skin"]["offset"].asInteger() >=0 &&
  1384. header["skin"]["size"].asInteger() > 0;
  1385. if (lod == LLModel::LOD_HIGH && !mSubmodelID)
  1386. {
  1387. // Try to load skin info and decomp info
  1388. std::ios::pos_type cur_pos = is.tellg();
  1389. loadSkinInfo(header, is);
  1390. is.seekg(cur_pos);
  1391. }
  1392. if ((lod == LLModel::LOD_HIGH || lod == LLModel::LOD_PHYSICS) &&
  1393. !mSubmodelID)
  1394. {
  1395. std::ios::pos_type cur_pos = is.tellg();
  1396. loadDecomposition(header, is);
  1397. is.seekg(cur_pos);
  1398. }
  1399. is.seekg(header[model_names[lod]]["offset"].asInteger(), std::ios_base::cur);
  1400. if (unpackVolumeFaces(is, header[model_names[lod]]["size"].asInteger()))
  1401. {
  1402. if (has_skin)
  1403. {
  1404. // Build out mSkinWeight from face info
  1405. for (S32 i = 0; i < getNumVolumeFaces(); ++i)
  1406. {
  1407. const LLVolumeFace& face = getVolumeFace(i);
  1408. if (face.mWeights)
  1409. {
  1410. for (S32 j = 0; j < face.mNumVertices; ++j)
  1411. {
  1412. LLVector4a& w = face.mWeights[j];
  1413. std::vector<JointWeight> wght;
  1414. for (S32 k = 0; k < 4; ++k)
  1415. {
  1416. S32 idx = (S32)w[k];
  1417. F32 f = w[k] - idx;
  1418. if (f > 0.f)
  1419. {
  1420. wght.emplace_back(idx, f);
  1421. }
  1422. }
  1423. if (!wght.empty())
  1424. {
  1425. LLVector3 pos(face.mPositions[j].getF32ptr());
  1426. mSkinWeights[pos] = wght;
  1427. }
  1428. }
  1429. }
  1430. }
  1431. }
  1432. return true;
  1433. }
  1434. else
  1435. {
  1436. llwarns << "Volume faces unpacking failed !" << llendl;
  1437. }
  1438. return false;
  1439. }
  1440. bool LLModel::isMaterialListSubset(LLModel* ref)
  1441. {
  1442. if (!ref) return false;
  1443. U32 model_count = mMaterialList.size();
  1444. U32 ref_count = ref->mMaterialList.size();
  1445. if (model_count > ref_count)
  1446. {
  1447. // This model cannot be a strict subset if it has more materials
  1448. // than the reference.
  1449. return false;
  1450. }
  1451. for (U32 src = 0; src < model_count; ++src)
  1452. {
  1453. bool found = false;
  1454. for (U32 dst = 0; dst < ref_count; ++dst)
  1455. {
  1456. found = mMaterialList[src] == ref->mMaterialList[dst];
  1457. if (found)
  1458. {
  1459. break;
  1460. }
  1461. }
  1462. if (!found)
  1463. {
  1464. llwarns << "Could not find material " << mMaterialList[src]
  1465. << " in reference model " << ref->mLabel << llendl;
  1466. return false;
  1467. }
  1468. }
  1469. return true;
  1470. }
  1471. #if 0 // Not used
  1472. bool LLModel::needToAddFaces(LLModel* ref, S32& ref_face_cnt,
  1473. S32& mdl_face_cnt)
  1474. {
  1475. bool changed = false;
  1476. if (ref_face_cnt < mdl_face_cnt)
  1477. {
  1478. ref_face_cnt += mdl_face_cnt - ref_face_cnt;
  1479. changed = true;
  1480. }
  1481. else if (mdl_face_cnt < ref_face_cnt)
  1482. {
  1483. mdl_face_cnt += ref_face_cnt - mdl_face_cnt;
  1484. changed = true;
  1485. }
  1486. return changed;
  1487. }
  1488. #endif
  1489. #if 0 // Moved to llfloatermodelpreview.cpp
  1490. bool LLModel::matchMaterialOrder(LLModel* ref, S32& ref_face_cnt,
  1491. S32& mdl_face_cnt)
  1492. {
  1493. // Is this a subset ?
  1494. // LODs cannot currently add new materials, e.g.
  1495. // 1. ref = a,b,c lod1 = d,e => This is not permitted
  1496. // 2. ref = a,b,c lod1 = c => This would be permitted
  1497. if (!isMaterialListSubset(ref))
  1498. {
  1499. llinfos << "Material of model is not a subset of reference." << llendl;
  1500. return false;
  1501. }
  1502. if (mMaterialList.size() > ref->mMaterialList.size())
  1503. {
  1504. // We passed isMaterialListSubset, so materials are a subset, but a
  1505. // subset is not supposed to be larger than original and if we keep
  1506. // going, reordering will cause a crash.
  1507. llinfos << "Material of model has more materials than a reference."
  1508. << llendl;
  1509. return false;
  1510. }
  1511. std::map<std::string, U32> index_map;
  1512. // Build a map of material slot names to face indexes
  1513. bool reorder = false;
  1514. std::set<std::string> base_mat;
  1515. std::set<std::string> cur_mat;
  1516. for (U32 i = 0; i < mMaterialList.size(); ++i)
  1517. {
  1518. index_map[ref->mMaterialList[i]] = i;
  1519. // If any material name does not match reference, we need to reorder
  1520. reorder |= ref->mMaterialList[i] != mMaterialList[i];
  1521. base_mat.insert(ref->mMaterialList[i]);
  1522. cur_mat.insert(mMaterialList[i]);
  1523. }
  1524. if (reorder &&
  1525. // Do not reorder if material name sets do not match
  1526. base_mat == cur_mat)
  1527. {
  1528. std::vector<LLVolumeFace> new_face_list;
  1529. new_face_list.resize(mVolumeFaces.size());
  1530. std::vector<std::string> new_material_list;
  1531. new_material_list.resize(mMaterialList.size());
  1532. U32 faces_count = mVolumeFaces.size();
  1533. // Rebuild face list so materials have the same order as the reference
  1534. // model
  1535. for (U32 i = 0, count = mMaterialList.size(); i < count; ++i)
  1536. {
  1537. U32 ref_idx = index_map[mMaterialList[i]];
  1538. if (i < faces_count)
  1539. {
  1540. new_face_list[ref_idx] = mVolumeFaces[i];
  1541. }
  1542. new_material_list[ref_idx] = mMaterialList[i];
  1543. }
  1544. llassert(new_material_list == ref->mMaterialList);
  1545. mVolumeFaces = new_face_list;
  1546. }
  1547. // Override material list with reference model ordering
  1548. mMaterialList = ref->mMaterialList;
  1549. return true;
  1550. }
  1551. #endif
  1552. bool LLModel::loadSkinInfo(const LLSD& header, std::istream& is)
  1553. {
  1554. S32 offset = header["skin"]["offset"].asInteger();
  1555. S32 size = header["skin"]["size"].asInteger();
  1556. if (offset >= 0 && size > 0)
  1557. {
  1558. is.seekg(offset, std::ios_base::cur);
  1559. LLSD skin_data;
  1560. if (unzip_llsd(skin_data, is, size))
  1561. {
  1562. mSkinInfo.fromLLSD(skin_data);
  1563. return true;
  1564. }
  1565. }
  1566. return false;
  1567. }
  1568. bool LLModel::loadDecomposition(const LLSD& header, std::istream& is)
  1569. {
  1570. S32 offset = header["physics_convex"]["offset"].asInteger();
  1571. S32 size = header["physics_convex"]["size"].asInteger();
  1572. if (offset >= 0 && size > 0 && !mSubmodelID)
  1573. {
  1574. is.seekg(offset, std::ios_base::cur);
  1575. LLSD data;
  1576. if (unzip_llsd(data, is, size))
  1577. {
  1578. mPhysics.fromLLSD(data);
  1579. updateHullCenters();
  1580. }
  1581. }
  1582. return true;
  1583. }
  1584. LLModel::Decomposition::Decomposition(const LLSD& data)
  1585. {
  1586. fromLLSD(data);
  1587. }
  1588. LLModel::Decomposition::Decomposition(const LLSD& data, const LLUUID& mesh_id)
  1589. : mMeshID(mesh_id)
  1590. {
  1591. fromLLSD(data);
  1592. }
  1593. void LLModel::Decomposition::fromLLSD(const LLSD& decomp)
  1594. {
  1595. if (decomp.has("HullList") && decomp.has("Positions"))
  1596. {
  1597. const LLSD::Binary& hulls = decomp["HullList"].asBinary();
  1598. const LLSD::Binary& position = decomp["Positions"].asBinary();
  1599. U16* p = (U16*)&position[0];
  1600. mHull.resize(hulls.size());
  1601. LLVector3 min;
  1602. LLVector3 max;
  1603. LLVector3 range;
  1604. if (decomp.has("Min"))
  1605. {
  1606. min.setValue(decomp["Min"]);
  1607. max.setValue(decomp["Max"]);
  1608. }
  1609. else
  1610. {
  1611. min.set(-0.5f, -0.5f, -0.5f);
  1612. max.set(0.5f, 0.5f, 0.5f);
  1613. }
  1614. range = max-min;
  1615. for (U32 i = 0; i < hulls.size(); ++i)
  1616. {
  1617. U16 count = hulls[i] == 0 ? 256 : hulls[i];
  1618. std::set<U64> valid;
  1619. // Each hull must contain at least 4 unique points
  1620. for (U32 j = 0; j < count; ++j)
  1621. {
  1622. U64 test = (U64)p[0] | ((U64)p[1] << 16) | ((U64)p[2] << 32);
  1623. // Point must be unique
  1624. //llassert(valid.find(test) == valid.end());
  1625. valid.insert(test);
  1626. mHull[i].emplace_back((F32)p[0] / 65535.f * range.mV[0] + min.mV[0],
  1627. (F32)p[1] / 65535.f * range.mV[1] + min.mV[1],
  1628. (F32)p[2] / 65535.f * range.mV[2] + min.mV[2]);
  1629. p += 3;
  1630. }
  1631. }
  1632. }
  1633. if (decomp.has("BoundingVerts"))
  1634. {
  1635. const LLSD::Binary& position = decomp["BoundingVerts"].asBinary();
  1636. U16* p = (U16*)&position[0];
  1637. LLVector3 min;
  1638. LLVector3 max;
  1639. LLVector3 range;
  1640. if (decomp.has("Min"))
  1641. {
  1642. min.setValue(decomp["Min"]);
  1643. max.setValue(decomp["Max"]);
  1644. }
  1645. else
  1646. {
  1647. min.set(-0.5f, -0.5f, -0.5f);
  1648. max.set(0.5f, 0.5f, 0.5f);
  1649. }
  1650. range = max - min;
  1651. U32 count = position.size() / 6;
  1652. for (U32 j = 0; j < count; ++j)
  1653. {
  1654. mBaseHull.emplace_back((F32)p[0] / 65535.f * range.mV[0] + min.mV[0],
  1655. (F32)p[1] / 65535.f * range.mV[1] + min.mV[1],
  1656. (F32)p[2] / 65535.f * range.mV[2] + min.mV[2]);
  1657. p += 3;
  1658. }
  1659. }
  1660. else
  1661. {
  1662. // Empty base hull mesh to indicate decomposition has been loaded but
  1663. // contains no base hull
  1664. mBaseHullMesh.clear();
  1665. }
  1666. }
  1667. bool LLModel::Decomposition::hasHullList() const
  1668. {
  1669. return !mHull.empty();
  1670. }
  1671. LLSD LLModel::Decomposition::asLLSD() const
  1672. {
  1673. LLSD ret;
  1674. if (mBaseHull.empty() && mHull.empty())
  1675. {
  1676. // Nothing to write
  1677. return ret;
  1678. }
  1679. // Write decomposition block
  1680. // ["physics_convex"]["HullList"] -- list of 8 bit integers, each entry
  1681. // represents a hull with specified number of points
  1682. // ["physics_convex"]["Position"] -- list of 16-bit integers to be decoded
  1683. // to given domain, encoded 3D points
  1684. // ["physics_convex"]["BoundingVerts"] -- list of 16-bit integers to be
  1685. // decoded to given domain, encoded 3D points representing a single hull
  1686. // approximation of given shape
  1687. // Get minimum and maximum
  1688. LLVector3 min;
  1689. if (mHull.empty())
  1690. {
  1691. min = mBaseHull[0];
  1692. }
  1693. else
  1694. {
  1695. min = mHull[0][0];
  1696. }
  1697. LLVector3 max = min;
  1698. LLSD::Binary hulls(mHull.size());
  1699. U32 total = 0;
  1700. for (U32 i = 0; i < mHull.size(); ++i)
  1701. {
  1702. U32 size = mHull[i].size();
  1703. total += size;
  1704. hulls[i] = (U8)size;
  1705. for (U32 j = 0; j < mHull[i].size(); ++j)
  1706. {
  1707. update_min_max(min, max, mHull[i][j]);
  1708. }
  1709. }
  1710. for (U32 i = 0; i < mBaseHull.size(); ++i)
  1711. {
  1712. update_min_max(min, max, mBaseHull[i]);
  1713. }
  1714. ret["Min"] = min.getValue();
  1715. ret["Max"] = max.getValue();
  1716. LLVector3 range = max-min;
  1717. if (!hulls.empty())
  1718. {
  1719. ret["HullList"] = hulls;
  1720. }
  1721. if (total > 0)
  1722. {
  1723. LLSD::Binary p(total * 6);
  1724. U32 vert_idx = 0;
  1725. for (U32 i = 0; i < mHull.size(); ++i)
  1726. {
  1727. std::set<U64> valid;
  1728. llassert(!mHull[i].empty());
  1729. for (U32 j = 0; j < mHull[i].size(); ++j)
  1730. {
  1731. U64 test = 0;
  1732. const F32* src = mHull[i][j].mV;
  1733. for (U32 k = 0; k < 3; k++)
  1734. {
  1735. // Convert to 16-bit normalized across domain
  1736. U16 val =
  1737. (U16)(((src[k] - min.mV[k]) / range.mV[k]) * 65535);
  1738. if (valid.size() < 3)
  1739. {
  1740. switch (k)
  1741. {
  1742. case 0: test = test | (U64)val; break;
  1743. case 1: test = test | ((U64)val << 16); break;
  1744. case 2: test = test | ((U64)val << 32); break;
  1745. };
  1746. valid.insert(test);
  1747. }
  1748. // Write to binary buffer
  1749. U8* buff = (U8*)&val;
  1750. p[vert_idx++] = buff[0];
  1751. p[vert_idx++] = buff[1];
  1752. // Make sure we have not run off the end of the array
  1753. llassert(vert_idx <= p.size());
  1754. }
  1755. }
  1756. // Must have at least 3 unique points
  1757. llassert(valid.size() > 2);
  1758. }
  1759. ret["Positions"] = p;
  1760. }
  1761. if (!mBaseHull.empty())
  1762. {
  1763. LLSD::Binary p(mBaseHull.size() * 6);
  1764. U32 vert_idx = 0;
  1765. for (U32 j = 0; j < mBaseHull.size(); ++j)
  1766. {
  1767. const F32* v = mBaseHull[j].mV;
  1768. for (U32 k = 0; k < 3; k++)
  1769. {
  1770. // Convert to 16-bit normalized across domain
  1771. U16 val = (U16)(((v[k] - min.mV[k]) / range.mV[k]) * 65535);
  1772. U8* buff = (U8*)&val;
  1773. //write to binary buffer
  1774. p[vert_idx++] = buff[0];
  1775. p[vert_idx++] = buff[1];
  1776. if (vert_idx > p.size())
  1777. {
  1778. llerrs << "Index out of bounds" << llendl;
  1779. }
  1780. }
  1781. }
  1782. ret["BoundingVerts"] = p;
  1783. }
  1784. return ret;
  1785. }
  1786. void LLModel::Decomposition::merge(const LLModel::Decomposition* rhs)
  1787. {
  1788. if (!rhs)
  1789. {
  1790. return;
  1791. }
  1792. if (mMeshID != rhs->mMeshID)
  1793. {
  1794. llerrs << "Attempted to merge with decomposition of some other mesh."
  1795. << llendl;
  1796. }
  1797. if (mBaseHull.empty())
  1798. {
  1799. // Take base hull and decomposition from rhs
  1800. mHull = rhs->mHull;
  1801. mBaseHull = rhs->mBaseHull;
  1802. mMesh = rhs->mMesh;
  1803. mBaseHullMesh = rhs->mBaseHullMesh;
  1804. }
  1805. if (mPhysicsShapeMesh.empty())
  1806. {
  1807. // Take physics shape mesh from rhs
  1808. mPhysicsShapeMesh = rhs->mPhysicsShapeMesh;
  1809. }
  1810. }
  1811. LLModelInstance::LLModelInstance(const LLSD& data)
  1812. : LLModelInstanceBase()
  1813. {
  1814. mLocalMeshID = data["mesh_id"].asInteger();
  1815. mLabel = data["label"].asString();
  1816. mTransform.setValue(data["transform"]);
  1817. for (U32 i = 0, count = data["material"].size(); i < count; ++i)
  1818. {
  1819. LLImportMaterial mat(data["material"][i]);
  1820. mMaterial[mat.mBinding] = mat;
  1821. }
  1822. }
  1823. LLSD LLModelInstance::asLLSD()
  1824. {
  1825. LLSD ret;
  1826. ret["mesh_id"] = mModel->mLocalID;
  1827. ret["label"] = mLabel;
  1828. ret["transform"] = mTransform.getValue();
  1829. U32 i = 0;
  1830. for (std::map<std::string, LLImportMaterial>::iterator
  1831. iter = mMaterial.begin(), end = mMaterial.end();
  1832. iter != end; ++iter)
  1833. {
  1834. ret["material"][i++] = iter->second.asLLSD();
  1835. }
  1836. return ret;
  1837. }
  1838. LLImportMaterial::LLImportMaterial(const LLSD& data)
  1839. {
  1840. mDiffuseMapFilename = data["diffuse"]["filename"].asString();
  1841. mDiffuseMapLabel = data["diffuse"]["label"].asString();
  1842. mDiffuseColor.setValue(data["diffuse"]["color"]);
  1843. mFullbright = data["fullbright"].asBoolean();
  1844. mBinding = data["binding"].asString();
  1845. }
  1846. LLSD LLImportMaterial::asLLSD()
  1847. {
  1848. LLSD ret;
  1849. ret["diffuse"]["filename"] = mDiffuseMapFilename;
  1850. ret["diffuse"]["label"] = mDiffuseMapLabel;
  1851. ret["diffuse"]["color"] = mDiffuseColor.getValue();
  1852. ret["fullbright"] = mFullbright;
  1853. ret["binding"] = mBinding;
  1854. return ret;
  1855. }
  1856. bool LLImportMaterial::operator<(const LLImportMaterial& rhs) const
  1857. {
  1858. if (mDiffuseMapID != rhs.mDiffuseMapID)
  1859. {
  1860. return mDiffuseMapID < rhs.mDiffuseMapID;
  1861. }
  1862. if (mDiffuseMapFilename != rhs.mDiffuseMapFilename)
  1863. {
  1864. return mDiffuseMapFilename < rhs.mDiffuseMapFilename;
  1865. }
  1866. if (mDiffuseMapLabel != rhs.mDiffuseMapLabel)
  1867. {
  1868. return mDiffuseMapLabel < rhs.mDiffuseMapLabel;
  1869. }
  1870. if (mDiffuseColor != rhs.mDiffuseColor)
  1871. {
  1872. return mDiffuseColor < rhs.mDiffuseColor;
  1873. }
  1874. if (mBinding != rhs.mBinding)
  1875. {
  1876. return mBinding < rhs.mBinding;
  1877. }
  1878. return mFullbright < rhs.mFullbright;
  1879. }